repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
7,141
arch/sparc/lib/csum_copy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* csum_copy.S: Checksum+copy code for sparc64 * * Copyright (C) 2005 David S. Miller <davem@davemloft.net> */ #include <asm/export.h> #ifdef __KERNEL__ #define GLOBAL_SPARE %g7 #else #define GLOBAL_SPARE %g5 #endif #ifndef EX_LD #define EX_LD(x) x #endif #ifndef EX_ST #define EX_ST(x) x #endif #ifndef EX_RETVAL #define EX_RETVAL(x) x #endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest #endif #ifndef STORE #define STORE(type,src,addr) type src, [addr] #endif #ifndef FUNC_NAME #define FUNC_NAME csum_partial_copy_nocheck #endif .register %g2, #scratch .register %g3, #scratch .text 90: /* We checked for zero length already, so there must be * at least one byte. */ be,pt %icc, 1f nop EX_LD(LOAD(ldub, %o0 + 0x00, %o4)) add %o0, 1, %o0 sub %o2, 1, %o2 EX_ST(STORE(stb, %o4, %o1 + 0x00)) add %o1, 1, %o1 1: andcc %o0, 0x2, %g0 be,pn %icc, 80f cmp %o2, 2 blu,pn %icc, 60f nop EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) add %o0, 2, %o0 sub %o2, 2, %o2 EX_ST(STORE(sth, %o5, %o1 + 0x00)) add %o1, 2, %o1 ba,pt %xcc, 80f add %o5, %o4, %o4 .globl FUNC_NAME .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 mov -1, %o3 clr %o4 andcc %g1, 0x3, %g0 bne,pn %icc, 95f LOAD(prefetch, %o0 + 0x040, #n_reads) brz,pn %o2, 70f andcc %o0, 0x3, %g0 /* We "remember" whether the lowest bit in the address * was set in GLOBAL_SPARE. Because if it is, we have to swap * upper and lower 8 bit fields of the sum we calculate. */ bne,pn %icc, 90b andcc %o0, 0x1, GLOBAL_SPARE 80: LOAD(prefetch, %o0 + 0x080, #n_reads) andncc %o2, 0x3f, %g3 LOAD(prefetch, %o0 + 0x0c0, #n_reads) sub %o2, %g3, %o2 brz,pn %g3, 2f LOAD(prefetch, %o0 + 0x100, #n_reads) /* So that we don't need to use the non-pairing * add-with-carry instructions we accumulate 32-bit * values into a 64-bit register. At the end of the * loop we fold it down to 32-bits and so on. */ ba,pt %xcc, 1f LOAD(prefetch, %o0 + 0x140, #n_reads) .align 32 1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) EX_LD(LOAD(lduw, %o0 + 0x04, %g1)) EX_LD(LOAD(lduw, %o0 + 0x08, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x00)) EX_LD(LOAD(lduw, %o0 + 0x0c, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x04)) EX_LD(LOAD(lduw, %o0 + 0x10, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x08)) EX_LD(LOAD(lduw, %o0 + 0x14, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x0c)) EX_LD(LOAD(lduw, %o0 + 0x18, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x10)) EX_LD(LOAD(lduw, %o0 + 0x1c, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x14)) EX_LD(LOAD(lduw, %o0 + 0x20, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x18)) EX_LD(LOAD(lduw, %o0 + 0x24, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x1c)) EX_LD(LOAD(lduw, %o0 + 0x28, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x20)) EX_LD(LOAD(lduw, %o0 + 0x2c, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x24)) EX_LD(LOAD(lduw, %o0 + 0x30, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x28)) EX_LD(LOAD(lduw, %o0 + 0x34, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x2c)) EX_LD(LOAD(lduw, %o0 + 0x38, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x30)) EX_LD(LOAD(lduw, %o0 + 0x3c, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x34)) LOAD(prefetch, %o0 + 0x180, #n_reads) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x38)) subcc %g3, 0x40, %g3 add %o0, 0x40, %o0 add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x3c)) bne,pt %icc, 1b add %o1, 0x40, %o1 2: and %o2, 0x3c, %g3 brz,pn %g3, 2f sub %o2, %g3, %o2 1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) subcc %g3, 0x4, %g3 add %o0, 0x4, %o0 add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x00)) bne,pt %icc, 1b add %o1, 0x4, %o1 2: /* fold 64-->32 */ srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 60: /* %o4 has the 16-bit sum we have calculated so-far. */ cmp %o2, 2 blu,pt %icc, 1f nop EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) sub %o2, 2, %o2 add %o0, 2, %o0 add %o4, %o5, %o4 EX_ST(STORE(sth, %o5, %o1 + 0x00)) add %o1, 0x2, %o1 1: brz,pt %o2, 1f nop EX_LD(LOAD(ldub, %o0 + 0x00, %o5)) sub %o2, 1, %o2 add %o0, 1, %o0 EX_ST(STORE(stb, %o5, %o1 + 0x00)) sllx %o5, 8, %o5 add %o1, 1, %o1 add %o4, %o5, %o4 1: /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 1: brz,pt GLOBAL_SPARE, 1f nop /* We started with an odd byte, byte-swap the result. */ srl %o4, 8, %o5 and %o4, 0xff, %g1 sll %g1, 8, %g1 or %o5, %g1, %o4 1: addcc %o3, %o4, %o3 addc %g0, %o3, %o3 70: retl srl %o3, 0, %o0 95: mov 0, GLOBAL_SPARE brlez,pn %o2, 4f andcc %o0, 1, %o5 be,a,pt %icc, 1f srl %o2, 1, %g1 sub %o2, 1, %o2 EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE)) add %o0, 1, %o0 EX_ST(STORE(stb, GLOBAL_SPARE, %o1)) srl %o2, 1, %g1 add %o1, 1, %o1 1: brz,a,pn %g1, 3f andcc %o2, 1, %g0 andcc %o0, 2, %g0 be,a,pt %icc, 1f srl %g1, 1, %g1 EX_LD(LOAD(lduh, %o0, %o4)) sub %o2, 2, %o2 srl %o4, 8, %g2 sub %g1, 1, %g1 EX_ST(STORE(stb, %g2, %o1)) add %o4, GLOBAL_SPARE, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 1)) add %o0, 2, %o0 srl %g1, 1, %g1 add %o1, 2, %o1 1: brz,a,pn %g1, 2f andcc %o2, 2, %g0 EX_LD(LOAD(lduw, %o0, %o4)) 5: srl %o4, 24, %g2 srl %o4, 16, %g3 EX_ST(STORE(stb, %g2, %o1)) srl %o4, 8, %g2 EX_ST(STORE(stb, %g3, %o1 + 1)) add %o0, 4, %o0 EX_ST(STORE(stb, %g2, %o1 + 2)) addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 3)) addc GLOBAL_SPARE, %g0, GLOBAL_SPARE add %o1, 4, %o1 subcc %g1, 1, %g1 bne,a,pt %icc, 5b EX_LD(LOAD(lduw, %o0, %o4)) sll GLOBAL_SPARE, 16, %g2 srl GLOBAL_SPARE, 16, GLOBAL_SPARE srl %g2, 16, %g2 andcc %o2, 2, %g0 add %g2, GLOBAL_SPARE, GLOBAL_SPARE 2: be,a,pt %icc, 3f andcc %o2, 1, %g0 EX_LD(LOAD(lduh, %o0, %o4)) andcc %o2, 1, %g0 srl %o4, 8, %g2 add %o0, 2, %o0 EX_ST(STORE(stb, %g2, %o1)) add GLOBAL_SPARE, %o4, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 1)) add %o1, 2, %o1 3: be,a,pt %icc, 1f sll GLOBAL_SPARE, 16, %o4 EX_LD(LOAD(ldub, %o0, %g2)) sll %g2, 8, %o4 EX_ST(STORE(stb, %g2, %o1)) add GLOBAL_SPARE, %o4, GLOBAL_SPARE sll GLOBAL_SPARE, 16, %o4 1: addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE srl GLOBAL_SPARE, 16, %o4 addc %g0, %o4, GLOBAL_SPARE brz,pt %o5, 4f srl GLOBAL_SPARE, 8, %o4 and GLOBAL_SPARE, 0xff, %g2 and %o4, 0xff, %o4 sll %g2, 8, %g2 or %g2, %o4, GLOBAL_SPARE 4: addcc %o3, GLOBAL_SPARE, %o3 addc %g0, %o3, %o0 retl srl %o0, 0, %o0 .size FUNC_NAME, .-FUNC_NAME
aixcc-public/challenge-001-exemplar-source
2,633
arch/sparc/lib/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com) * * This file implements mcount(), which is used to collect profiling data. * This can also be tweaked for kernel stack overflow detection. */ #include <linux/linkage.h> #include <asm/export.h> /* * This is the main variant and is called by C code. GCC's -pg option * automatically instruments every C function with a call to this. */ .text .align 32 .globl _mcount .type _mcount,#function EXPORT_SYMBOL(_mcount) .globl mcount .type mcount,#function _mcount: mcount: #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE /* Do nothing, the retl/nop below is all we need. */ #else sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 cmp %g1, %g2 be,pn %icc, 1f mov %i7, %g3 save %sp, -176, %sp mov %g3, %o1 jmpl %g1, %o7 mov %i7, %o0 ret restore /* not reached */ 1: #ifdef CONFIG_FUNCTION_GRAPH_TRACER sethi %hi(ftrace_graph_return), %g1 ldx [%g1 + %lo(ftrace_graph_return)], %g3 cmp %g2, %g3 bne,pn %xcc, 5f sethi %hi(ftrace_graph_entry_stub), %g2 sethi %hi(ftrace_graph_entry), %g1 or %g2, %lo(ftrace_graph_entry_stub), %g2 ldx [%g1 + %lo(ftrace_graph_entry)], %g1 cmp %g1, %g2 be,pt %xcc, 2f nop 5: mov %i7, %g2 mov %fp, %g3 save %sp, -176, %sp mov %g2, %l0 ba,pt %xcc, ftrace_graph_caller mov %g3, %l1 #endif 2: #endif #endif retl nop .size _mcount,.-_mcount .size mcount,.-mcount #ifdef CONFIG_FUNCTION_TRACER .globl ftrace_stub .type ftrace_stub,#function ftrace_stub: retl nop .size ftrace_stub,.-ftrace_stub #ifdef CONFIG_DYNAMIC_FTRACE .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: mov %i7, %g2 mov %fp, %g3 save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 mov %g3, %l1 .globl ftrace_call ftrace_call: call ftrace_stub mov %i7, %o0 #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: call ftrace_stub nop #endif ret restore #ifdef CONFIG_FUNCTION_GRAPH_TRACER .size ftrace_graph_call,.-ftrace_graph_call #endif .size ftrace_call,.-ftrace_call .size ftrace_caller,.-ftrace_caller #endif #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) mov %l0, %o0 mov %i7, %o1 call prepare_ftrace_return mov %l1, %o2 ret restore %o0, -8, %i7 END(ftrace_graph_caller) ENTRY(return_to_handler) save %sp, -176, %sp call ftrace_return_to_handler mov %fp, %o0 jmpl %o0 + 8, %g0 restore END(return_to_handler) #endif
aixcc-public/challenge-001-exemplar-source
4,174
arch/sparc/lib/atomic_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* atomic.S: These things are too big to do inline. * * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) */ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> #include <asm/export.h> .text /* Three versions of the atomic routines, one that * does not return a value and does not perform * memory barriers, and a two which return * a value, the new and old value resp. and does the * barriers. */ #define ATOMIC_OP(op) \ ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic_##op); \ EXPORT_SYMBOL(arch_atomic_##op); #define ATOMIC_OP_RETURN(op) \ ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ op %g1, %o0, %g1; \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic_##op##_return); \ EXPORT_SYMBOL(arch_atomic_##op##_return); #define ATOMIC_FETCH_OP(op) \ ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic_fetch_##op); \ EXPORT_SYMBOL(arch_atomic_fetch_##op); ATOMIC_OP(add) ATOMIC_OP_RETURN(add) ATOMIC_FETCH_OP(add) ATOMIC_OP(sub) ATOMIC_OP_RETURN(sub) ATOMIC_FETCH_OP(sub) ATOMIC_OP(and) ATOMIC_FETCH_OP(and) ATOMIC_OP(or) ATOMIC_FETCH_OP(or) ATOMIC_OP(xor) ATOMIC_FETCH_OP(xor) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #define ATOMIC64_OP(op) \ ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic64_##op); \ EXPORT_SYMBOL(arch_atomic64_##op); #define ATOMIC64_OP_RETURN(op) \ ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic64_##op##_return); \ EXPORT_SYMBOL(arch_atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op) \ ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ mov %g1, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(arch_atomic64_fetch_##op); \ EXPORT_SYMBOL(arch_atomic64_fetch_##op); ATOMIC64_OP(add) ATOMIC64_OP_RETURN(add) ATOMIC64_FETCH_OP(add) ATOMIC64_OP(sub) ATOMIC64_OP_RETURN(sub) ATOMIC64_FETCH_OP(sub) ATOMIC64_OP(and) ATOMIC64_FETCH_OP(and) ATOMIC64_OP(or) ATOMIC64_FETCH_OP(or) ATOMIC64_OP(xor) ATOMIC64_FETCH_OP(xor) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o0], %g1 brlez,pn %g1, 3f sub %g1, 1, %g7 casx [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) nop 3: retl sub %g1, 1, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) ENDPROC(arch_atomic64_dec_if_positive) EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
aixcc-public/challenge-001-exemplar-source
2,332
arch/sparc/lib/NG4memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* NG4memset.S: Niagara-4 optimized memset/bzero. * * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ #include <asm/asi.h> .register %g2, #scratch .register %g3, #scratch .text .align 32 .globl NG4memset NG4memset: andcc %o1, 0xff, %o4 be,pt %icc, 1f mov %o2, %o1 sllx %o4, 8, %g1 or %g1, %o4, %o2 sllx %o2, 16, %g1 or %g1, %o2, %o2 sllx %o2, 32, %g1 ba,pt %icc, 1f or %g1, %o2, %o4 .size NG4memset,.-NG4memset .align 32 .globl NG4bzero NG4bzero: clr %o4 1: cmp %o1, 16 ble %icc, .Ltiny mov %o0, %o3 sub %g0, %o0, %g1 and %g1, 0x7, %g1 brz,pt %g1, .Laligned8 sub %o1, %g1, %o1 1: stb %o4, [%o0 + 0x00] subcc %g1, 1, %g1 bne,pt %icc, 1b add %o0, 1, %o0 .Laligned8: cmp %o1, 64 + (64 - 8) ble .Lmedium sub %g0, %o0, %g1 andcc %g1, (64 - 1), %g1 brz,pn %g1, .Laligned64 sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 8, %g1 bne,pt %icc, 1b add %o0, 0x8, %o0 .Laligned64: andn %o1, 64 - 1, %g1 sub %o1, %g1, %o1 brnz,pn %o4, .Lnon_bzero_loop mov 0x20, %g2 1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P subcc %g1, 0x40, %g1 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P bne,pt %icc, 1b add %o0, 0x40, %o0 .Lpostloop: cmp %o1, 8 bl,pn %icc, .Ltiny membar #StoreStore|#StoreLoad .Lmedium: andn %o1, 0x7, %g1 sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 0x8, %g1 bne,pt %icc, 1b add %o0, 0x08, %o0 andcc %o1, 0x4, %g1 be,pt %icc, .Ltiny sub %o1, %g1, %o1 stw %o4, [%o0 + 0x00] add %o0, 0x4, %o0 .Ltiny: cmp %o1, 0 be,pn %icc, .Lexit 1: subcc %o1, 1, %o1 stb %o4, [%o0 + 0x00] bne,pt %icc, 1b add %o0, 1, %o0 .Lexit: retl mov %o3, %o0 .Lnon_bzero_loop: mov 0x08, %g3 mov 0x28, %o5 1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P subcc %g1, 0x40, %g1 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P add %o0, 0x10, %o0 stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P bne,pt %icc, 1b add %o0, 0x30, %o0 ba,a,pt %icc, .Lpostloop nop .size NG4bzero,.-NG4bzero
aixcc-public/challenge-001-exemplar-source
1,109
arch/sparc/lib/NG2copy_to_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* NG2copy_to_user.S: Niagara-2 optimized copy to userspace. * * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ #define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ .word 98b, y; \ .text; \ .align 4; #define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ .word 98b, y##_fp; \ .text; \ .align 4; #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif #ifndef ASI_BLK_AIUS_4V #define ASI_BLK_AIUS_4V 0x17 #endif #ifndef ASI_BLK_INIT_QUAD_LDD_AIUS #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 #endif #define FUNC_NAME NG2copy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS_4V #define EX_RETVAL(x) 0 #ifdef __KERNEL__ /* Writing to %asi is _expensive_ so we hardcode it. * Reading %asi to check for KERNEL_DS is comparatively * cheap. */ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ bne,pn %icc, raw_copy_in_user; \ nop #endif #include "NG2memcpy.S"
aixcc-public/challenge-001-exemplar-source
31,360
arch/sparc/lib/M7memcpy.S
/* * M7memcpy: Optimized SPARC M7 memcpy * * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. */ .file "M7memcpy.S" /* * memcpy(s1, s2, len) * * Copy s2 to s1, always copy n bytes. * Note: this C code does not work for overlapped copies. * * Fast assembler language version of the following C-program for memcpy * which represents the `standard' for the C-library. * * void * * memcpy(void *s, const void *s0, size_t n) * { * if (n != 0) { * char *s1 = s; * const char *s2 = s0; * do { * *s1++ = *s2++; * } while (--n != 0); * } * return (s); * } * * * SPARC T7/M7 Flow : * * if (count < SMALL_MAX) { * if count < SHORTCOPY (SHORTCOPY=3) * copy bytes; exit with dst addr * if src & dst aligned on word boundary but not long word boundary, * copy with ldw/stw; branch to finish_up * if src & dst aligned on long word boundary * copy with ldx/stx; branch to finish_up * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) * copy bytes; exit with dst addr * move enough bytes to get src to word boundary * if dst now on word boundary * move_words: * copy words; branch to finish_up * if dst now on half word boundary * load words, shift half words, store words; branch to finish_up * if dst on byte 1 * load words, shift 3 bytes, store words; branch to finish_up * if dst on byte 3 * load words, shift 1 byte, store words; branch to finish_up * finish_up: * copy bytes; exit with dst addr * } else { More than SMALL_MAX bytes * move bytes until dst is on long word boundary * if( src is on long word boundary ) { * if (count < MED_MAX) { * finish_long: src/dst aligned on 8 bytes * copy with ldx/stx in 8-way unrolled loop; * copy final 0-63 bytes; exit with dst addr * } else { src/dst aligned; count > MED_MAX * align dst on 64 byte boundary; for main data movement: * prefetch src data to L2 cache; let HW prefetch move data to L1 cache * Use BIS (block initializing store) to avoid copying store cache * lines from memory. But pre-store first element of each cache line * ST_CHUNK lines in advance of the rest of that cache line. That * gives time for replacement cache lines to be written back without * excess STQ and Miss Buffer filling. Repeat until near the end, * then finish up storing before going to finish_long. * } * } else { src/dst not aligned on 8 bytes * if src is word aligned and count < MED_WMAX * move words in 8-way unrolled loop * move final 0-31 bytes; exit with dst addr * if count < MED_UMAX * use alignaddr/faligndata combined with ldd/std in 8-way * unrolled loop to move data. * go to unalign_done * else * setup alignaddr for faligndata instructions * align dst on 64 byte boundary; prefetch src data to L1 cache * loadx8, falign, block-store, prefetch loop * (only use block-init-store when src/dst on 8 byte boundaries.) * unalign_done: * move remaining bytes for unaligned cases. exit with dst addr. * } * */ #include <asm/visasm.h> #include <asm/asi.h> #if !defined(EX_LD) && !defined(EX_ST) #define NON_USER_COPY #endif #ifndef EX_LD #define EX_LD(x,y) x #endif #ifndef EX_LD_FP #define EX_LD_FP(x,y) x #endif #ifndef EX_ST #define EX_ST(x,y) x #endif #ifndef EX_ST_FP #define EX_ST_FP(x,y) x #endif #ifndef EX_RETVAL #define EX_RETVAL(x) x #endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest #endif #ifndef STORE #define STORE(type,src,addr) type src, [addr] #endif /* * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache * line as "least recently used" which means if many threads are * active, it has a high probability of being pushed out of the cache * between the first initializing store and the final stores. * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which * marks the cache line as "most recently used" for all * but the last cache line */ #ifndef STORE_ASI #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P #else #define STORE_ASI 0x80 /* ASI_P */ #endif #endif #ifndef STORE_MRU_ASI #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P #else #define STORE_MRU_ASI 0x80 /* ASI_P */ #endif #endif #ifndef STORE_INIT #define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI #endif #ifndef STORE_INIT_MRU #define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI #endif #ifndef FUNC_NAME #define FUNC_NAME M7memcpy #endif #ifndef PREAMBLE #define PREAMBLE #endif #define BLOCK_SIZE 64 #define SHORTCOPY 3 #define SHORTCHECK 14 #define SHORT_LONG 64 /* max copy for short longword-aligned case */ /* must be at least 64 */ #define SMALL_MAX 128 #define MED_UMAX 1024 /* max copy for medium un-aligned case */ #define MED_WMAX 1024 /* max copy for medium word-aligned case */ #define MED_MAX 1024 /* max copy for medium longword-aligned case */ #define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ #define ALIGN_PRE 24 /* distance for aligned prefetch loop */ .register %g2,#scratch .section ".text" .global FUNC_NAME .type FUNC_NAME, #function .align 16 FUNC_NAME: srlx %o2, 31, %g2 cmp %g2, 0 tne %xcc, 5 PREAMBLE mov %o0, %g1 ! save %o0 brz,pn %o2, .Lsmallx cmp %o2, 3 ble,pn %icc, .Ltiny_cp cmp %o2, 19 ble,pn %icc, .Lsmall_cp or %o0, %o1, %g2 cmp %o2, SMALL_MAX bl,pn %icc, .Lmedium_cp nop .Lmedium: neg %o0, %o5 andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned brz,pt %o5, .Ldst_aligned_on_8 ! %o5 has the bytes to be written in partial store. sub %o2, %o5, %o2 sub %o1, %o0, %o1 ! %o1 gets the difference 7: ! dst aligning loop add %o1, %o0, %o4 EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte subcc %o5, 1, %o5 EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) bgu,pt %xcc, 7b add %o0, 1, %o0 ! advance dst add %o1, %o0, %o1 ! restore %o1 .Ldst_aligned_on_8: andcc %o1, 7, %o5 brnz,pt %o5, .Lsrc_dst_unaligned_on_8 nop .Lsrc_dst_aligned_on_8: ! check if we are copying MED_MAX or more bytes set MED_MAX, %o3 cmp %o2, %o3 ! limit to store buffer size bgu,pn %xcc, .Llarge_align8_copy nop /* * Special case for handling when src and dest are both long word aligned * and total data to move is less than MED_MAX bytes */ .Lmedlong: subcc %o2, 63, %o2 ! adjust length to allow cc test ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes nop .Lmedl64: EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load subcc %o2, 64, %o2 ! decrement length count EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 add %o1, 64, %o1 ! increase src ptr by 64 EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) add %o0, 64, %o0 ! increase dst ptr by 64 EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) .Lmedl63: addcc %o2, 32, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load sub %o2, 32, %o2 ! decrement length count EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 add %o1, 32, %o1 ! increase src ptr by 32 EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedl31: addcc %o2, 16, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left nop ! EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) add %o1, 16, %o1 ! increase src ptr by 16 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) sub %o2, 16, %o2 ! decrease count by 16 EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) add %o0, 16, %o0 ! increase dst ptr by 16 EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) .Lmedl15: addcc %o2, 15, %o2 ! restore count bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes add %o1, 8, %o1 ! increase src ptr by 8 add %o0, 8, %o0 ! increase dst ptr by 8 subcc %o2, 8, %o2 ! decrease count by 8 bnz,pn %xcc, .Lmedw7 EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 retl mov EX_RETVAL(%g1), %o0 ! restore %o0 .align 16 .Lsrc_dst_unaligned_on_8: ! DST is 8-byte aligned, src is not 2: andcc %o1, 0x3, %o5 ! test word alignment bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned nop /* * Handle all cases where src and dest are aligned on word * boundaries. Use unrolled loops for better performance. * This option wins over standard large data move when * source and destination is in cache for.Lmedium * to short data moves. */ set MED_WMAX, %o3 cmp %o2, %o3 ! limit to store buffer size bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop nop subcc %o2, 31, %o2 ! adjust length to allow cc test ! for end of loop ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 .Lmedw32: EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) subcc %o2, 32, %o2 ! decrement length count EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) add %o1, 32, %o1 ! increase src ptr by 32 EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) or %o4, %o5, %o5 bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedw31: addcc %o2, 31, %o2 ! restore count bz,pt %xcc, .Lsmallx ! exit if finished nop cmp %o2, 16 blt,pt %xcc, .Lmedw15 nop EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes sllx %o4, 32, %o5 subcc %o2, 16, %o2 ! decrement length count EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increase src ptr by 16 EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increase dst ptr by 16 sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) .Lmedw15: bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes subcc %o2, 8, %o2 ! decrease count by 8 EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes add %o0, 8, %o0 ! increase dst ptr by 8 EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes bz,pt %xcc, .Lsmallx ! exit if finished .Lmedw7: ! count is ge 1, less than 8 cmp %o2, 4 ! check for 4 bytes left blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left nop ! EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes add %o1, 4, %o1 ! increase src ptr by 4 add %o0, 4, %o0 ! increase dst ptr by 4 subcc %o2, 4, %o2 ! decrease count by 4 bnz .Lsmallleft3 EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes retl mov EX_RETVAL(%g1), %o0 .align 16 .Llarge_align8_copy: ! Src and dst share 8 byte alignment ! align dst to 64 byte boundary andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned brz,pn %o3, .Laligned_to_64 andcc %o0, 8, %o3 ! odd long words to move? brz,pt %o3, .Laligned_to_16 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 8, %o2 add %o1, 8, %o1 ! increment src ptr add %o0, 8, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_16: andcc %o0, 16, %o3 ! pair of long words to move? brz,pt %o3, .Laligned_to_32 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 16, %o2 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increment src ptr EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_32: andcc %o0, 32, %o3 ! four long words to move? brz,pt %o3, .Laligned_to_64 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 32, %o2 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) add %o1, 32, %o1 ! increment src ptr EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 32, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_64: ! ! Using block init store (BIS) instructions to avoid fetching cache ! lines from memory. Use ST_CHUNK stores to first element of each cache ! line (similar to prefetching) to avoid overfilling STQ or miss buffers. ! Gives existing cache lines time to be moved out of L1/L2/L3 cache. ! Initial stores using MRU version of BIS to keep cache line in ! cache until we are ready to store final element of cache line. ! Then store last element using the LRU version of BIS. ! andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 ! ! We use STORE_MRU_ASI for the first seven stores to each cache line ! followed by STORE_ASI (mark as LRU) for the last store. That ! mixed approach reduces the probability that the cache line is removed ! before we finish setting it, while minimizing the effects on ! other cached values during a large memcpy ! ! ST_CHUNK batches up initial BIS operations for several cache lines ! to allow multiple requests to not be blocked by overflowing the ! the store miss buffer. Then the matching stores for all those ! BIS operations are executed. ! sub %o0, 8, %o0 ! adjust %o0 for ASI alignment .Lalign_loop: cmp %o5, ST_CHUNK*64 blu,pt %xcc, .Lalign_loop_fin mov ST_CHUNK,%o3 .Lalign_loop_start: prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 subcc %o3, 1, %o3 EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) bgu %xcc,.Lalign_loop_start add %o0, 56, %o0 mov ST_CHUNK,%o3 sllx %o3, 6, %o4 ! ST_CHUNK*64 sub %o1, %o4, %o1 ! reset %o1 sub %o0, %o4, %o0 ! reset %o0 .Lalign_loop_rest: EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) add %o0, 16, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) subcc %o3, 1, %o3 EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) sub %o5, 64, %o5 bgu %xcc,.Lalign_loop_rest ! mark cache line as LRU EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) cmp %o5, ST_CHUNK*64 bgu,pt %xcc, .Lalign_loop_start mov ST_CHUNK,%o3 cmp %o5, 0 beq .Lalign_done nop .Lalign_loop_fin: EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) add %o1, 64, %o1 EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) add %o0, 64, %o0 EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) bgu %xcc,.Lalign_loop_fin EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) .Lalign_done: add %o0, 8, %o0 ! restore %o0 from ASI alignment membar #StoreStore sub %o2, 63, %o2 ! adjust length to allow cc test ba .Lmedl63 ! in .Lmedl63 nop .align 16 ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX .Lunalignsetup: .Lunalignrejoin: mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it #ifdef NON_USER_COPY VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) #else VISEntryHalf #endif mov %o3, %g1 ! restore %g1 set MED_UMAX, %o3 cmp %o2, %o3 ! check for.Lmedium unaligned limit bge,pt %xcc,.Lunalign_large prefetch [%o1 + (4 * BLOCK_SIZE)], 20 andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 cmp %o2, 8 ! Insure we do not load beyond bgt .Lunalign_adjust ! end of source buffer andn %o1, 0x7, %o4 ! %o4 has long word aligned src address add %o2, 64, %o2 ! adjust to leave loop sub %o5, 64, %o5 ! early if necessary .Lunalign_adjust: alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) .Lunalign_loop: EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) faligndata %f0, %f2, %f16 EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) subcc %o5, BLOCK_SIZE, %o5 EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) faligndata %f2, %f4, %f18 EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) faligndata %f4, %f6, %f20 EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) faligndata %f6, %f8, %f22 EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) faligndata %f8, %f10, %f24 EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) faligndata %f10, %f12, %f26 EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) add %o4, BLOCK_SIZE, %o4 EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) faligndata %f12, %f14, %f28 EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) faligndata %f14, %f0, %f30 EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) add %o0, BLOCK_SIZE, %o0 bgu,pt %xcc, .Lunalign_loop prefetch [%o4 + (5 * BLOCK_SIZE)], 20 ba .Lunalign_done nop .Lunalign_large: andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? bz %xcc, .Lunalignsrc sub %o3, 64, %o3 ! %o3 will be multiple of 8 neg %o3 ! bytes until dest is 64 byte aligned sub %o2, %o3, %o2 ! update cnt with bytes to be moved ! Move bytes according to source alignment andcc %o1, 0x1, %o5 bnz %xcc, .Lunalignbyte ! check for byte alignment nop andcc %o1, 2, %o5 ! check for half word alignment bnz %xcc, .Lunalignhalf nop ! Src is word aligned .Lunalignword: EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 subcc %o3, 8, %o3 ! decrease count by 8 EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 add %o0, 8, %o0 ! increase dst ptr by 8 bnz %xcc, .Lunalignword EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) ba .Lunalignsrc nop ! Src is half-word aligned .Lunalignhalf: EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes sllx %o4, 32, %o5 ! shift left EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 sllx %o5, 16, %o5 EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) add %o1, 8, %o1 subcc %o3, 8, %o3 bnz %xcc, .Lunalignhalf add %o0, 8, %o0 ba .Lunalignsrc nop ! Src is Byte aligned .Lunalignbyte: sub %o0, %o1, %o0 ! share pointer advance .Lunalignbyte_loop: EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 56, %o5 EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 40, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 24, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 8, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 add %o0, %o1, %o0 EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) sub %o0, %o1, %o0 subcc %o3, 8, %o3 bnz %xcc, .Lunalignbyte_loop add %o1, 8, %o1 add %o0,%o1, %o0 ! restore pointer ! Destination is now block (64 byte aligned) .Lunalignsrc: andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 add %o2, 64, %o2 ! Insure we do not load beyond sub %o5, 64, %o5 ! end of source buffer andn %o1, 0x7, %o4 ! %o4 has long word aligned src address alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 .Lunalign_sloop: EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) faligndata %f14, %f16, %f0 EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) faligndata %f16, %f18, %f2 EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) faligndata %f18, %f20, %f4 EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) faligndata %f20, %f22, %f6 EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) faligndata %f22, %f24, %f8 EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) faligndata %f24, %f26, %f10 EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) faligndata %f26, %f28, %f12 EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) add %o4, 64, %o4 EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) faligndata %f28, %f30, %f14 EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) add %o0, 64, %o0 EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) fsrc2 %f30, %f14 bgu,pt %xcc, .Lunalign_sloop prefetch [%o4 + (8 * BLOCK_SIZE)], 20 .Lunalign_done: ! Handle trailing bytes, 64 to 127 ! Dest long word aligned, Src not long word aligned cmp %o2, 15 bleu %xcc, .Lunalign_short andn %o2, 0x7, %o5 ! %o5 is multiple of 8 and %o2, 0x7, %o2 ! residue bytes in %o2 add %o2, 8, %o2 sub %o5, 8, %o5 ! insure we do not load past end of src andn %o1, 0x7, %o4 ! %o4 has long word aligned src address add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword .Lunalign_by8: EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 faligndata %f0, %f2, %f16 subcc %o5, 8, %o5 EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) fsrc2 %f2, %f0 bgu,pt %xcc, .Lunalign_by8 add %o0, 8, %o0 .Lunalign_short: #ifdef NON_USER_COPY VISExitHalfFast #else VISExitHalf #endif ba .Lsmallrest nop /* * This is a special case of nested memcpy. This can happen when kernel * calls unaligned memcpy back to back without saving FP registers. We need * traps(context switch) to save/restore FP registers. If the kernel calls * memcpy without this trap sequence we will hit FP corruption. Let's use * the normal integer load/store method in this case. */ #ifdef NON_USER_COPY .Lmedium_vis_entry_fail_cp: or %o0, %o1, %g2 #endif .Lmedium_cp: LOAD(prefetch, %o1 + 0x40, #n_reads_strong) andcc %g2, 0x7, %g0 bne,pn %xcc, .Lmedium_unaligned_cp nop .Lmedium_noprefetch_cp: andncc %o2, 0x20 - 1, %o5 be,pn %xcc, 2f sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %xcc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %xcc, 3f sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %xcc, 1b EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit_cp cmp %o2, 0x04 bl,pn %xcc, .Ltiny_cp nop EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %xcc, .Ltiny_cp EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %xcc, .Lexit_cp .Lmedium_unaligned_cp: /* First get dest 8 byte aligned. */ sub %g0, %o0, %o3 and %o3, 0x7, %o3 brz,pt %o3, 2f sub %o2, %o3, %o2 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %o3, 1, %o3 add %o0, 1, %o0 bne,pt %xcc, 1b EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %o3 brz,pn %o3, .Lmedium_noprefetch_cp sll %o3, 3, %o3 mov 64, %g2 sub %g2, %o3, %g2 andn %o1, 0x7, %o1 EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %o3, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, %g7 or %g7, %o4, %g7 EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %xcc, 1b sllx %g3, %o3, %o4 srl %o3, 3, %o3 add %o1, %o3, %o1 brz,pn %o2, .Lexit_cp nop ba,pt %xcc, .Lsmall_unaligned_cp .Ltiny_cp: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) ba,pt %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) .Lsmall_cp: andcc %g2, 0x3, %g0 bne,pn %xcc, .Lsmall_unaligned_cp andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %xcc, 1b EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit_cp nop ba,a,pt %xcc, .Ltiny_cp .Lsmall_unaligned_cp: 1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %xcc, 1b EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %xcc, .Lexit_cp .Lsmallrest: tst %o2 bz,pt %xcc, .Lsmallx cmp %o2, 4 blt,pn %xcc, .Lsmallleft3 nop sub %o2, 3, %o2 .Lsmallnotalign4: EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte subcc %o2, 4, %o2 ! reduce count by 4 EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 add %o1, 4, %o1 ! advance SRC by 4 EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) add %o0, 4, %o0 ! advance DST by 4 EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) addcc %o2, 3, %o2 ! restore count bz,pt %xcc, .Lsmallx .Lsmallleft3: ! 1, 2, or 3 bytes remain subcc %o2, 1, %o2 EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte bz,pt %xcc, .Lsmallx EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte subcc %o2, 1, %o2 bz,pt %xcc, .Lsmallx EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte .Lsmallx: retl mov EX_RETVAL(%g1), %o0 .Lsmallfin: tst %o2 bnz,pn %xcc, .Lsmallleft3 nop retl mov EX_RETVAL(%g1), %o0 ! restore %o0 .Lexit_cp: retl mov EX_RETVAL(%g1), %o0 .size FUNC_NAME, .-FUNC_NAME
aixcc-public/challenge-001-exemplar-source
1,608
arch/sparc/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* strlen.S: Sparc optimized strlen code * Hand optimized from GNU libc's strlen * Copyright (C) 1991,1996 Free Software Foundation * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/export.h> #define LO_MAGIC 0x01010101 #define HI_MAGIC 0x80808080 .text ENTRY(strlen) mov %o0, %o1 andcc %o0, 3, %g0 BRANCH32(be, pt, 9f) sethi %hi(HI_MAGIC), %o4 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 11f) add %o0, 1, %o0 andcc %o0, 3, %g0 BRANCH32(be, pn, 4f) or %o4, %lo(HI_MAGIC), %o3 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 12f) add %o0, 1, %o0 andcc %o0, 3, %g0 BRANCH32(be, pt, 5f) sethi %hi(LO_MAGIC), %o4 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 13f) add %o0, 1, %o0 BRANCH32(ba, pt, 8f) or %o4, %lo(LO_MAGIC), %o2 9: or %o4, %lo(HI_MAGIC), %o3 4: sethi %hi(LO_MAGIC), %o4 5: or %o4, %lo(LO_MAGIC), %o2 8: ld [%o0], %o5 2: sub %o5, %o2, %o4 andcc %o4, %o3, %g0 BRANCH32(be, pt, 8b) add %o0, 4, %o0 /* Check every byte. */ srl %o5, 24, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o0, -4, %o4 srl %o5, 16, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o4, 1, %o4 srl %o5, 8, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o4, 1, %o4 andcc %o5, 0xff, %g0 BRANCH32_ANNUL(bne, pt, 2b) ld [%o0], %o5 add %o4, 1, %o4 1: retl sub %o4, %o1, %o0 11: retl mov 0, %o0 12: retl mov 1, %o0 13: retl mov 2, %o0 ENDPROC(strlen) EXPORT_SYMBOL(strlen)
aixcc-public/challenge-001-exemplar-source
16,228
arch/sparc/lib/xor.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/sparc64/lib/xor.S * * High speed xor_block operation for RAID4/5 utilizing the * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. * * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 2006 David S. Miller <davem@davemloft.net> */ #include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> #include <asm/dcu.h> #include <asm/spitfire.h> #include <asm/export.h> /* * Requirements: * !(((long)dest | (long)sourceN) & (64 - 1)) && * !(len & 127) && len >= 256 */ .text /* VIS versions. */ ENTRY(xor_vis_2) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 128, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 2: ldda [%o1 + 64] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 stda %f16, [%o1] %asi ldda [%o2 + 64] %asi, %f48 ldda [%o1 + 128] %asi, %f0 fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 add %o1, 128, %o1 fxor %f36, %f52, %f52 add %o2, 128, %o2 fxor %f38, %f54, %f54 subcc %o0, 128, %o0 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 2b ldda [%o2] %asi, %f16 ldda [%o1 + 64] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 stda %f16, [%o1] %asi ldda [%o2 + 64] %asi, %f48 membar #Sync fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 + 64] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_2) EXPORT_SYMBOL(xor_vis_2) ENTRY(xor_vis_3) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 64, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 3: ldda [%o3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 add %o1, 64, %o1 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 add %o2, 64, %o2 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%o1] %asi, %f0 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 add %o3, 64, %o3 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 subcc %o0, 64, %o0 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 3b ldda [%o2] %asi, %f16 ldda [%o3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 membar #Sync fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%o1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_3) EXPORT_SYMBOL(xor_vis_3) ENTRY(xor_vis_4) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 64, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 4: ldda [%o3] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 add %o1, 64, %o1 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 add %o2, 64, %o2 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 ldda [%o4] %asi, %f48 fxor %f16, %f32, %f32 fxor %f18, %f34, %f34 fxor %f20, %f36, %f36 fxor %f22, %f38, %f38 add %o3, 64, %o3 fxor %f24, %f40, %f40 fxor %f26, %f42, %f42 fxor %f28, %f44, %f44 fxor %f30, %f46, %f46 ldda [%o1] %asi, %f0 fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 add %o4, 64, %o4 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 subcc %o0, 64, %o0 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 4b ldda [%o2] %asi, %f16 ldda [%o3] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 ldda [%o4] %asi, %f48 fxor %f16, %f32, %f32 fxor %f18, %f34, %f34 fxor %f20, %f36, %f36 fxor %f22, %f38, %f38 fxor %f24, %f40, %f40 fxor %f26, %f42, %f42 fxor %f28, %f44, %f44 fxor %f30, %f46, %f46 membar #Sync fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_4) EXPORT_SYMBOL(xor_vis_4) ENTRY(xor_vis_5) save %sp, -192, %sp rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %i0, 64, %i0 ldda [%i1] %asi, %f0 ldda [%i2] %asi, %f16 5: ldda [%i3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 add %i1, 64, %i1 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 add %i2, 64, %i2 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%i4] %asi, %f16 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 add %i3, 64, %i3 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 ldda [%i5] %asi, %f32 fxor %f48, %f16, %f48 fxor %f50, %f18, %f50 add %i4, 64, %i4 fxor %f52, %f20, %f52 fxor %f54, %f22, %f54 add %i5, 64, %i5 fxor %f56, %f24, %f56 fxor %f58, %f26, %f58 fxor %f60, %f28, %f60 fxor %f62, %f30, %f62 ldda [%i1] %asi, %f0 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 subcc %i0, 64, %i0 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%i1 - 64] %asi bne,pt %xcc, 5b ldda [%i2] %asi, %f16 ldda [%i3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%i4] %asi, %f16 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 ldda [%i5] %asi, %f32 fxor %f48, %f16, %f48 fxor %f50, %f18, %f50 fxor %f52, %f20, %f52 fxor %f54, %f22, %f54 fxor %f56, %f24, %f56 fxor %f58, %f26, %f58 fxor %f60, %f28, %f60 fxor %f62, %f30, %f62 membar #Sync fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%i1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi wr %g0, 0, %fprs ret restore ENDPROC(xor_vis_5) EXPORT_SYMBOL(xor_vis_5) /* Niagara versions. */ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ prefetch [%i1 + 0x40], #one_read ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ prefetch [%i0 + 0x40], #n_writes xor %o0, %i2, %o0 xor %o1, %i3, %o1 stxa %o0, [%i0 + 0x00] %asi stxa %o1, [%i0 + 0x08] %asi xor %o2, %i4, %o2 xor %o3, %i5, %o3 stxa %o2, [%i0 + 0x10] %asi stxa %o3, [%i0 + 0x18] %asi xor %o4, %g2, %o4 xor %o5, %g3, %o5 stxa %o4, [%i0 + 0x20] %asi stxa %o5, [%i0 + 0x28] %asi xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x30] %asi stxa %l3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %i1, 0x40, %i1 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_2) EXPORT_SYMBOL(xor_niagara_2) ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ xor %g2, %i2, %g2 xor %g3, %i3, %g3 xor %o0, %g2, %o0 xor %o1, %g3, %o1 stxa %o0, [%i0 + 0x00] %asi stxa %o1, [%i0 + 0x08] %asi ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ xor %l0, %i4, %l0 xor %l1, %i5, %l1 xor %o2, %l0, %o2 xor %o3, %l1, %o3 stxa %o2, [%i0 + 0x10] %asi stxa %o3, [%i0 + 0x18] %asi ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %g2, %i2, %g2 xor %g3, %i3, %g3 xor %o0, %g2, %o0 xor %o1, %g3, %o1 stxa %o0, [%i0 + 0x20] %asi stxa %o1, [%i0 + 0x28] %asi xor %l0, %i4, %l0 xor %l1, %i5, %l1 xor %o2, %l0, %o2 xor %o3, %l1, %o3 stxa %o2, [%i0 + 0x30] %asi stxa %o3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l7, 0x40, %l7 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_3) EXPORT_SYMBOL(xor_niagara_3) ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read prefetch [%i4], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 mov %i4, %l6 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x00] %asi stxa %l1, [%i0 + 0x08] %asi ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x10] %asi stxa %l1, [%i0 + 0x18] %asi ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x20] %asi stxa %l1, [%i0 + 0x28] %asi ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%l6 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %i4, %i2, %i4 xor %i5, %i3, %i5 xor %g2, %i4, %g2 xor %g3, %i5, %g3 xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x30] %asi stxa %l1, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 add %l7, 0x40, %l7 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l6, 0x40, %l6 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_4) EXPORT_SYMBOL(xor_niagara_4) ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read prefetch [%i4], #one_read prefetch [%i5], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 mov %i4, %l6 mov %i5, %l5 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x00] %asi stxa %l3, [%i0 + 0x08] %asi ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x10] %asi stxa %l3, [%i0 + 0x18] %asi ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x20] %asi stxa %l3, [%i0 + 0x28] %asi ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%l6 + 0x40], #one_read prefetch [%l5 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %i4, %i2, %i4 xor %i5, %i3, %i5 xor %g2, %i4, %g2 xor %g3, %i5, %g3 xor %l0, %g2, %l0 xor %l1, %g3, %l1 xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x30] %asi stxa %l3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 add %l7, 0x40, %l7 add %l6, 0x40, %l6 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l5, 0x40, %l5 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_5) EXPORT_SYMBOL(xor_niagara_5)
aixcc-public/challenge-001-exemplar-source
1,059
arch/sparc/lib/fls64.S
/* fls64.S: SPARC default __fls definition. * * SPARC default __fls definition, which follows the same algorithm as * in generic __fls(). This function will be boot time patched on T4 * and onward. */ #include <linux/linkage.h> #include <asm/export.h> .text .register %g2, #scratch .register %g3, #scratch ENTRY(__fls) mov -1, %g2 sllx %g2, 32, %g2 and %o0, %g2, %g2 brnz,pt %g2, 1f mov 63, %g1 sllx %o0, 32, %o0 mov 31, %g1 1: mov -1, %g2 sllx %g2, 48, %g2 and %o0, %g2, %g2 brnz,pt %g2, 2f mov -1, %g2 sllx %o0, 16, %o0 add %g1, -16, %g1 2: mov -1, %g2 sllx %g2, 56, %g2 and %o0, %g2, %g2 brnz,pt %g2, 3f mov -1, %g2 sllx %o0, 8, %o0 add %g1, -8, %g1 3: sllx %g2, 60, %g2 and %o0, %g2, %g2 brnz,pt %g2, 4f mov -1, %g2 sllx %o0, 4, %o0 add %g1, -4, %g1 4: sllx %g2, 62, %g2 and %o0, %g2, %g2 brnz,pt %g2, 5f mov -1, %g3 sllx %o0, 2, %o0 add %g1, -2, %g1 5: mov 0, %g2 sllx %g3, 63, %g3 and %o0, %g3, %o0 movre %o0, 1, %g2 sub %g1, %g2, %g1 jmp %o7+8 sra %g1, 0, %o0 ENDPROC(__fls) EXPORT_SYMBOL(__fls)
aixcc-public/challenge-001-exemplar-source
1,211
arch/sparc/lib/M7patch.S
/* * M7patch.S: Patch generic routines with M7 variant. * * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. */ #include <linux/linkage.h> #define BRANCH_ALWAYS 0x10680000 #define NOP 0x01000000 #define NG_DO_PATCH(OLD, NEW) \ sethi %hi(NEW), %g1; \ or %g1, %lo(NEW), %g1; \ sethi %hi(OLD), %g2; \ or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ sll %g1, 11, %g1; \ srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ sethi %hi(NOP), %g3; \ or %g3, %lo(NOP), %g3; \ stw %g3, [%g2 + 0x4]; \ flush %g2; ENTRY(m7_patch_copyops) NG_DO_PATCH(memcpy, M7memcpy) NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) retl nop ENDPROC(m7_patch_copyops) ENTRY(m7_patch_bzero) NG_DO_PATCH(memset, M7memset) NG_DO_PATCH(__bzero, M7bzero) NG_DO_PATCH(__clear_user, NGclear_user) NG_DO_PATCH(tsb_init, NGtsb_init) retl nop ENDPROC(m7_patch_bzero) ENTRY(m7_patch_pageops) NG_DO_PATCH(copy_user_page, NG4copy_user_page) NG_DO_PATCH(_clear_page, M7clear_page) NG_DO_PATCH(clear_user_page, M7clear_user_page) retl nop ENDPROC(m7_patch_pageops)
aixcc-public/challenge-001-exemplar-source
3,984
arch/sparc/lib/checksum_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* checksum.S: Sparc V9 optimized checksum code. * * Copyright(C) 1995 Linus Torvalds * Copyright(C) 1995 Miguel de Icaza * Copyright(C) 1996, 2000 David S. Miller * Copyright(C) 1997 Jakub Jelinek * * derived from: * Linux/Alpha checksum c-code * Linux/ix86 inline checksum assembly * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code) * David Mosberger-Tang for optimized reference c-code * BSD4.4 portable checksum routine */ #include <asm/export.h> .text csum_partial_fix_alignment: /* We checked for zero length already, so there must be * at least one byte. */ be,pt %icc, 1f nop ldub [%o0 + 0x00], %o4 add %o0, 1, %o0 sub %o1, 1, %o1 1: andcc %o0, 0x2, %g0 be,pn %icc, csum_partial_post_align cmp %o1, 2 blu,pn %icc, csum_partial_end_cruft nop lduh [%o0 + 0x00], %o5 add %o0, 2, %o0 sub %o1, 2, %o1 ba,pt %xcc, csum_partial_post_align add %o5, %o4, %o4 .align 32 .globl csum_partial .type csum_partial,#function EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads clr %o4 prefetch [%o0 + 0x040], #n_reads brz,pn %o1, csum_partial_finish andcc %o0, 0x3, %g0 /* We "remember" whether the lowest bit in the address * was set in %g7. Because if it is, we have to swap * upper and lower 8 bit fields of the sum we calculate. */ bne,pn %icc, csum_partial_fix_alignment andcc %o0, 0x1, %g7 csum_partial_post_align: prefetch [%o0 + 0x080], #n_reads andncc %o1, 0x3f, %o3 prefetch [%o0 + 0x0c0], #n_reads sub %o1, %o3, %o1 brz,pn %o3, 2f prefetch [%o0 + 0x100], #n_reads /* So that we don't need to use the non-pairing * add-with-carry instructions we accumulate 32-bit * values into a 64-bit register. At the end of the * loop we fold it down to 32-bits and so on. */ prefetch [%o0 + 0x140], #n_reads 1: lduw [%o0 + 0x00], %o5 lduw [%o0 + 0x04], %g1 lduw [%o0 + 0x08], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x0c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x10], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x14], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x18], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x1c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x20], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x24], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x28], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x2c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x30], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x34], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x38], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x3c], %g3 add %o4, %g1, %o4 prefetch [%o0 + 0x180], #n_reads add %o4, %g2, %o4 subcc %o3, 0x40, %o3 add %o0, 0x40, %o0 bne,pt %icc, 1b add %o4, %g3, %o4 2: and %o1, 0x3c, %o3 brz,pn %o3, 2f sub %o1, %o3, %o1 1: lduw [%o0 + 0x00], %o5 subcc %o3, 0x4, %o3 add %o0, 0x4, %o0 bne,pt %icc, 1b add %o4, %o5, %o4 2: /* fold 64-->32 */ srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 csum_partial_end_cruft: /* %o4 has the 16-bit sum we have calculated so-far. */ cmp %o1, 2 blu,pt %icc, 1f nop lduh [%o0 + 0x00], %o5 sub %o1, 2, %o1 add %o0, 2, %o0 add %o4, %o5, %o4 1: brz,pt %o1, 1f nop ldub [%o0 + 0x00], %o5 sub %o1, 1, %o1 add %o0, 1, %o0 sllx %o5, 8, %o5 add %o4, %o5, %o4 1: /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 1: brz,pt %g7, 1f nop /* We started with an odd byte, byte-swap the result. */ srl %o4, 8, %o5 and %o4, 0xff, %g1 sll %g1, 8, %g1 or %o5, %g1, %o4 1: addcc %o2, %o4, %o2 addc %g0, %o2, %o2 csum_partial_finish: retl srl %o2, 0, %o0
aixcc-public/challenge-001-exemplar-source
4,271
arch/sparc/net/bpf_jit_asm_32.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/ptrace.h> #include "bpf_jit_32.h" #define SAVE_SZ 96 #define SCRATCH_OFF 72 #define BE_PTR(label) be label #define SIGN_EXTEND(reg) #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ .text .globl bpf_jit_load_word bpf_jit_load_word: cmp r_OFF, 0 bl bpf_slow_path_word_neg nop .globl bpf_jit_load_word_positive_offset bpf_jit_load_word_positive_offset: sub r_HEADLEN, r_OFF, r_TMP cmp r_TMP, 3 ble bpf_slow_path_word add r_SKB_DATA, r_OFF, r_TMP andcc r_TMP, 3, %g0 bne load_word_unaligned nop retl ld [r_TMP], r_A load_word_unaligned: ldub [r_TMP + 0x0], r_OFF ldub [r_TMP + 0x1], r_TMP2 sll r_OFF, 8, r_OFF or r_OFF, r_TMP2, r_OFF ldub [r_TMP + 0x2], r_TMP2 sll r_OFF, 8, r_OFF or r_OFF, r_TMP2, r_OFF ldub [r_TMP + 0x3], r_TMP2 sll r_OFF, 8, r_OFF retl or r_OFF, r_TMP2, r_A .globl bpf_jit_load_half bpf_jit_load_half: cmp r_OFF, 0 bl bpf_slow_path_half_neg nop .globl bpf_jit_load_half_positive_offset bpf_jit_load_half_positive_offset: sub r_HEADLEN, r_OFF, r_TMP cmp r_TMP, 1 ble bpf_slow_path_half add r_SKB_DATA, r_OFF, r_TMP andcc r_TMP, 1, %g0 bne load_half_unaligned nop retl lduh [r_TMP], r_A load_half_unaligned: ldub [r_TMP + 0x0], r_OFF ldub [r_TMP + 0x1], r_TMP2 sll r_OFF, 8, r_OFF retl or r_OFF, r_TMP2, r_A .globl bpf_jit_load_byte bpf_jit_load_byte: cmp r_OFF, 0 bl bpf_slow_path_byte_neg nop .globl bpf_jit_load_byte_positive_offset bpf_jit_load_byte_positive_offset: cmp r_OFF, r_HEADLEN bge bpf_slow_path_byte nop retl ldub [r_SKB_DATA + r_OFF], r_A .globl bpf_jit_load_byte_msh bpf_jit_load_byte_msh: cmp r_OFF, 0 bl bpf_slow_path_byte_msh_neg nop .globl bpf_jit_load_byte_msh_positive_offset bpf_jit_load_byte_msh_positive_offset: cmp r_OFF, r_HEADLEN bge bpf_slow_path_byte_msh nop ldub [r_SKB_DATA + r_OFF], r_OFF and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X #define bpf_slow_path_common(LEN) \ save %sp, -SAVE_SZ, %sp; \ mov %i0, %o0; \ mov r_OFF, %o1; \ add %fp, SCRATCH_OFF, %o2; \ call skb_copy_bits; \ mov (LEN), %o3; \ cmp %o0, 0; \ restore; bpf_slow_path_word: bpf_slow_path_common(4) bl bpf_error ld [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_half: bpf_slow_path_common(2) bl bpf_error lduh [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_byte: bpf_slow_path_common(1) bl bpf_error ldub [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_byte_msh: bpf_slow_path_common(1) bl bpf_error ldub [%sp + SCRATCH_OFF], r_A and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X #define bpf_negative_common(LEN) \ save %sp, -SAVE_SZ, %sp; \ mov %i0, %o0; \ mov r_OFF, %o1; \ SIGN_EXTEND(%o1); \ call bpf_internal_load_pointer_neg_helper; \ mov (LEN), %o2; \ mov %o0, r_TMP; \ cmp %o0, 0; \ BE_PTR(bpf_error); \ restore; bpf_slow_path_word_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_word_negative_offset bpf_jit_load_word_negative_offset: bpf_negative_common(4) andcc r_TMP, 3, %g0 bne load_word_unaligned nop retl ld [r_TMP], r_A bpf_slow_path_half_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_half_negative_offset bpf_jit_load_half_negative_offset: bpf_negative_common(2) andcc r_TMP, 1, %g0 bne load_half_unaligned nop retl lduh [r_TMP], r_A bpf_slow_path_byte_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_byte_negative_offset bpf_jit_load_byte_negative_offset: bpf_negative_common(1) retl ldub [r_TMP], r_A bpf_slow_path_byte_msh_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_byte_msh_negative_offset bpf_jit_load_byte_msh_negative_offset: bpf_negative_common(1) ldub [r_TMP], r_OFF and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X bpf_error: /* Make the JIT program return zero. The JIT epilogue * stores away the original %o7 into r_saved_O7. The * normal leaf function return is to use "retl" which * would evalute to "jmpl %o7 + 8, %g0" but we want to * use the saved value thus the sequence you see here. */ jmpl r_saved_O7 + 8, %g0 clr %o0
aixcc-public/challenge-001-exemplar-source
9,770
arch/sparc/mm/hypersparc.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * hypersparc.S: High speed Hypersparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <asm/ptrace.h> #include <asm/psr.h> #include <asm/asm-offsets.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgtsrmmu.h> #include <linux/init.h> .text .align 4 .globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm .globl hypersparc_flush_cache_range, hypersparc_flush_cache_page .globl hypersparc_flush_page_to_ram .globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns .globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm .globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page hypersparc_flush_cache_all: WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_cache_size), %g4 ld [%g4 + %lo(vac_cache_size)], %g5 sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %g2 1: subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined bne 1b sta %g0, [%g5] ASI_M_FLUSH_CTX retl sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache /* We expand the window flush to get maximum performance. */ hypersparc_flush_cache_mm: #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 be hypersparc_flush_cache_mm_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o1 sethi %hi(vac_cache_size), %g2 ld [%g2 + %lo(vac_cache_size)], %o0 add %o1, %o1, %g1 add %o1, %g1, %g2 add %o1, %g2, %g3 add %o1, %g3, %g4 add %o1, %g4, %g5 add %o1, %g5, %o4 add %o1, %o4, %o5 /* BLAMMO! */ 1: subcc %o0, %o5, %o0 ! hyper_flush_cache_user sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER bne 1b sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER hypersparc_flush_cache_mm_out: retl nop /* The things we do for performance... */ hypersparc_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 be hypersparc_flush_cache_range_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 sethi %hi(vac_cache_size), %g2 ld [%g2 + %lo(vac_cache_size)], %o3 /* Here comes the fun part... */ add %o2, (PAGE_SIZE - 1), %o2 andn %o1, (PAGE_SIZE - 1), %o1 add %o4, %o4, %o5 andn %o2, (PAGE_SIZE - 1), %o2 add %o4, %o5, %g1 sub %o2, %o1, %g4 add %o4, %g1, %g2 sll %o3, 2, %g5 add %o4, %g2, %g3 cmp %g4, %g5 add %o4, %g3, %g4 blu 0f add %o4, %g4, %g5 add %o4, %g5, %g7 /* Flush entire user space, believe it or not this is quicker * than page at a time flushings for range > (cache_size<<2). */ 1: subcc %o3, %g7, %o3 sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER bne 1b sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER retl nop /* Below our threshold, flush one page at a time. */ 0: ld [%o0 + AOFF_mm_context], %o0 mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %o3 sta %o0, [%g7] ASI_M_MMUREGS add %o2, -PAGE_SIZE, %o0 1: or %o0, 0x400, %g7 lda [%g7] ASI_M_FLUSH_PROBE, %g7 orcc %g7, 0, %g0 be,a 3f mov %o0, %o2 add %o4, %g5, %g7 2: sub %o2, %g7, %o2 sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE andcc %o2, 0xffc, %g0 sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE bne 2b sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE 3: cmp %o2, %o1 bne 1b add %o2, -PAGE_SIZE, %o0 mov SRMMU_FAULT_STATUS, %g5 lda [%g5] ASI_M_MMUREGS, %g0 mov SRMMU_CTX_REG, %g7 sta %o3, [%g7] ASI_M_MMUREGS hypersparc_flush_cache_range_out: retl nop /* HyperSparc requires a valid mapping where we are about to flush * in order to check for a physical tag match during the flush. */ /* Verified, my ass... */ hypersparc_flush_cache_page: ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 be hypersparc_flush_cache_page_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 mov SRMMU_CTX_REG, %o3 andn %o1, (PAGE_SIZE - 1), %o1 lda [%o3] ASI_M_MMUREGS, %o2 sta %g2, [%o3] ASI_M_MMUREGS or %o1, 0x400, %o5 lda [%o5] ASI_M_FLUSH_PROBE, %g1 orcc %g0, %g1, %g0 be 2f add %o4, %o4, %o5 sub %o1, -PAGE_SIZE, %o1 add %o4, %o5, %g1 add %o4, %g1, %g2 add %o4, %g2, %g3 add %o4, %g3, %g4 add %o4, %g4, %g5 add %o4, %g5, %g7 /* BLAMMO! */ 1: sub %o1, %g7, %o1 sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE andcc %o1, 0xffc, %g0 sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE bne 1b sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE 2: mov SRMMU_FAULT_STATUS, %g7 mov SRMMU_CTX_REG, %g4 lda [%g7] ASI_M_MMUREGS, %g0 sta %o2, [%g4] ASI_M_MMUREGS hypersparc_flush_cache_page_out: retl nop hypersparc_flush_sig_insns: flush %o1 retl flush %o1 + 4 /* HyperSparc is copy-back. */ hypersparc_flush_page_to_ram: sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 andn %o0, (PAGE_SIZE - 1), %o0 add %o4, %o4, %o5 or %o0, 0x400, %g7 lda [%g7] ASI_M_FLUSH_PROBE, %g5 add %o4, %o5, %g1 orcc %g5, 0, %g0 be 2f add %o4, %g1, %g2 add %o4, %g2, %g3 sub %o0, -PAGE_SIZE, %o0 add %o4, %g3, %g4 add %o4, %g4, %g5 add %o4, %g5, %g7 /* BLAMMO! */ 1: sub %o0, %g7, %o0 sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE andcc %o0, 0xffc, %g0 sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE bne 1b sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE 2: mov SRMMU_FAULT_STATUS, %g1 retl lda [%g1] ASI_M_MMUREGS, %g0 /* HyperSparc is IO cache coherent. */ hypersparc_flush_page_for_dma: retl nop /* It was noted that at boot time a TLB flush all in a delay slot * can deliver an illegal instruction to the processor if the timing * is just right... */ hypersparc_flush_tlb_all: mov 0x400, %g1 sta %g0, [%g1] ASI_M_FLUSH_PROBE retl nop hypersparc_flush_tlb_mm: mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o1, -1 be hypersparc_flush_tlb_mm_out #endif mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_mm_out: retl sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be hypersparc_flush_tlb_range_out #endif sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 1: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_range_out: retl sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 #ifndef CONFIG_SMP cmp %o3, -1 be hypersparc_flush_tlb_page_out #endif lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS __INIT /* High speed page clear/copy. */ hypersparc_bzero_1page: /* NOTE: This routine has to be shorter than 40insns --jj */ clr %g1 mov 32, %g2 mov 64, %g3 mov 96, %g4 mov 128, %g5 mov 160, %g7 mov 192, %o2 mov 224, %o3 mov 16, %o1 1: stda %g0, [%o0 + %g0] ASI_M_BFILL stda %g0, [%o0 + %g2] ASI_M_BFILL stda %g0, [%o0 + %g3] ASI_M_BFILL stda %g0, [%o0 + %g4] ASI_M_BFILL stda %g0, [%o0 + %g5] ASI_M_BFILL stda %g0, [%o0 + %g7] ASI_M_BFILL stda %g0, [%o0 + %o2] ASI_M_BFILL stda %g0, [%o0 + %o3] ASI_M_BFILL subcc %o1, 1, %o1 bne 1b add %o0, 256, %o0 retl nop hypersparc_copy_1page: /* NOTE: This routine has to be shorter than 70insns --jj */ sub %o1, %o0, %o2 ! difference mov 16, %g1 1: sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY subcc %g1, 1, %g1 bne 1b add %o0, 32, %o0 retl nop .globl hypersparc_setup_blockops hypersparc_setup_blockops: sethi %hi(bzero_1page), %o0 or %o0, %lo(bzero_1page), %o0 sethi %hi(hypersparc_bzero_1page), %o1 or %o1, %lo(hypersparc_bzero_1page), %o1 sethi %hi(hypersparc_copy_1page), %o2 or %o2, %lo(hypersparc_copy_1page), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sethi %hi(__copy_1page), %o0 or %o0, %lo(__copy_1page), %o0 sethi %hi(hypersparc_setup_blockops), %o2 or %o2, %lo(hypersparc_setup_blockops), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sta %g0, [%g0] ASI_M_FLUSH_IWHOLE retl nop
aixcc-public/challenge-001-exemplar-source
25,181
arch/sparc/mm/ultra.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ultra.S: Don't expand these all over the place... * * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/pgtable.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/spitfire.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <asm/pil.h> #include <asm/head.h> #include <asm/thread_info.h> #include <asm/cacheflush.h> #include <asm/hypervisor.h> #include <asm/cpudata.h> /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support * IMMU flushes out of the secondary context. Someone needs * to throw a south lake birthday party for the folks * in Microelectronics who refused to fix this shit. */ /* This file is meant to be read efficiently by the CPU, not humans. * Staraj sie tego nikomu nie pierdolnac... */ .text .align 32 .globl __flush_tlb_mm __flush_tlb_mm: /* 19 insns */ /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 bne,pn %icc, __spitfire_flush_tlb_mm_slow mov 0x50, %g3 stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP sethi %hi(KERNBASE), %g3 flush %g3 retl nop nop nop nop nop nop nop nop nop nop .align 32 .globl __flush_tlb_page __flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, %pstate mov SECONDARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 stxa %o0, [%o4] ASI_DMMU andcc %o1, 1, %g0 andn %o1, 1, %o3 be,pn %icc, 1f or %o3, 0x10, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 1: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 retl wrpr %g7, 0x0, %pstate nop nop nop nop .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 andn %g7, PSTATE_IE, %g2 wrpr %g2, %pstate mov SECONDARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 stxa %o0, [%o4] ASI_DMMU 1: sub %o1, (1 << 3), %o1 ldx [%o2 + %o1], %o3 andcc %o3, 1, %g0 andn %o3, 1, %o3 be,pn %icc, 2f or %o3, 0x10, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 2: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 retl wrpr %g7, 0x0, %pstate nop nop nop nop .align 32 .globl __flush_tlb_kernel_range __flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %o3 srlx %o3, 18, %o4 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow sethi %hi(PAGE_SIZE), %o4 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 2: sethi %hi(KERNBASE), %o3 flush %o3 retl nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop __spitfire_flush_tlb_kernel_range_slow: mov 63 * 8, %o4 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */ bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %o3 stxa %g0, [%o3] ASI_IMMU stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS membar #Sync 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3 andcc %o3, 0x40, %g0 bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %o3 stxa %g0, [%o3] ASI_DMMU stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS membar #Sync 2: sub %o4, 8, %o4 brgez,pt %o4, 1b nop retl nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 wrpr %g1, PSTATE_IE, %pstate stxa %o0, [%o1] ASI_DMMU stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP flush %g6 stxa %g2, [%o1] ASI_DMMU sethi %hi(KERNBASE), %o1 flush %o1 retl wrpr %g1, 0, %pstate /* * The following code flushes one page_size worth. */ .section .kprobes.text, "ax" .align 32 .globl __flush_icache_page __flush_icache_page: /* %o0 = phys_page */ srlx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_OFFSET), %g1 sllx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_SIZE), %g2 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 add %o0, %g1, %o0 1: subcc %g2, 32, %g2 bne,pt %icc, 1b flush %o0 + %g2 retl nop #ifdef DCACHE_ALIASING_POSSIBLE #if (PAGE_SHIFT != 13) #error only page shift of 13 is supported by dcache flush #endif #define DTAG_MASK 0x3 /* This routine is Spitfire specific so the hardcoded * D-cache size and line-size are OK. */ .align 64 .globl __flush_dcache_page __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ sethi %hi(PAGE_OFFSET), %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 ! physical address srlx %o0, 11, %o0 ! make D-cache TAG sethi %hi(1 << 14), %o2 ! D-cache size sub %o2, (1 << 5), %o2 ! D-cache line size 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG andcc %o3, DTAG_MASK, %g0 ! Valid? be,pn %xcc, 2f ! Nope, branch andn %o3, DTAG_MASK, %o3 ! Clear valid bits cmp %o3, %o0 ! TAG match? bne,pt %xcc, 2f ! Nope, branch nop stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG membar #Sync 2: brnz,pt %o2, 1b sub %o2, (1 << 5), %o2 ! D-cache line size /* The I-cache does not snoop local stores so we * better flush that too when necessary. */ brnz,pt %o1, __flush_icache_page sllx %o0, 11, %o0 retl nop #endif /* DCACHE_ALIASING_POSSIBLE */ .previous /* Cheetah specific versions, patched at boot time. */ __cheetah_flush_tlb_mm: /* 19 insns */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o2 mov 0x40, %g3 ldxa [%o2] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 or %o0, %o1, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o2] ASI_DMMU stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g2, [%o2] ASI_DMMU sethi %hi(KERNBASE), %o2 flush %o2 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o4] ASI_DMMU andcc %o1, 1, %g0 be,pn %icc, 1f andn %o1, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 1: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o4] ASI_DMMU 1: sub %o1, (1 << 3), %o1 ldx [%o2 + %o1], %o3 andcc %o3, 1, %g0 be,pn %icc, 2f andn %o3, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 2: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %o3 srlx %o3, 18, %o4 brnz,pn %o4, 3f sethi %hi(PAGE_SIZE), %o4 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 2: sethi %hi(KERNBASE), %o3 flush %o3 retl nop 3: mov 0x80, %o4 stxa %g0, [%o4] ASI_DMMU_DEMAP membar #Sync stxa %g0, [%o4] ASI_IMMU_DEMAP membar #Sync retl nop nop nop nop nop nop nop nop #ifdef DCACHE_ALIASING_POSSIBLE __cheetah_flush_dcache_page: /* 11 insns */ sethi %hi(PAGE_OFFSET), %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 sethi %hi(PAGE_SIZE), %o4 1: subcc %o4, (1 << 5), %o4 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE membar #Sync bne,pt %icc, 1b nop retl /* I-cache flush never needed on Cheetah, see callers. */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ /* Hypervisor specific versions, patched at boot time. */ __hypervisor_tlb_tl0_error: save %sp, -192, %sp mov %i0, %o0 call hypervisor_tlbop_error mov %i1, %o1 ret restore __hypervisor_flush_tlb_mm: /* 19 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP brnz,pn %o0, 1f mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop __hypervisor_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ mov %o0, %g2 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ mov %g2, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop nop nop __hypervisor_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 mov %o0, %g3 1: sub %g1, (1 << 3), %g1 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ mov %g3, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop nop nop __hypervisor_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %g2 srlx %g2, 18, %g3 brnz,pn %g3, 4f mov %o0, %g1 sethi %hi(PAGE_SIZE), %g3 sub %g2, %g3, %g2 1: add %g1, %g2, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 3f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl nop 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov 0, %o2 /* ARG2: mmu context == nucleus */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP brnz,pn %o0, 3b mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop #ifdef DCACHE_ALIASING_POSSIBLE /* XXX Niagara and friends have an 8K cache, so no aliasing is * XXX possible, but nothing explicit in the Hypervisor API * XXX guarantees this. */ __hypervisor_flush_dcache_page: /* 2 insns */ retl nop #endif tlb_patch_one: 1: lduw [%o1], %g1 stw %g1, [%o0] flush %o0 subcc %o2, 1, %o2 add %o1, 4, %o1 bne,pt %icc, 1b add %o0, 4, %o0 retl nop #ifdef CONFIG_SMP /* These are all called by the slaves of a cross call, at * trap level 1, with interrupts fully disabled. * * Register usage: * %g5 mm->context (all tlb flushes) * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * * %g6 scratch 1 * %g2 scratch 2 * %g3 scratch 3 * %g4 scratch 4 */ .align 32 .globl xcall_flush_tlb_mm xcall_flush_tlb_mm: /* 24 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 or %g5, %g4, %g5 /* Preserve nucleus page size fields */ stxa %g5, [%g2] ASI_DMMU mov 0x40, %g4 stxa %g0, [%g4] ASI_DMMU_DEMAP stxa %g0, [%g4] ASI_IMMU_DEMAP stxa %g3, [%g2] ASI_DMMU retry nop nop nop nop nop nop nop nop nop nop nop nop nop .globl xcall_flush_tlb_page xcall_flush_tlb_page: /* 20 insns */ /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU andcc %g1, 0x1, %g0 be,pn %icc, 2f andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%g4] ASI_DMMU retry nop nop nop nop nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP membar #Sync brnz,pt %g3, 1b sub %g3, %g2, %g3 retry 2: mov 63 * 8, %g1 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */ bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %g2 stxa %g0, [%g2] ASI_IMMU stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS membar #Sync 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2 andcc %g2, 0x40, %g0 bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %g2 stxa %g0, [%g2] ASI_DMMU stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS membar #Sync 2: sub %g1, 8, %g1 brgez,pt %g1, 1b nop retry nop nop nop nop nop nop nop nop nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. */ .globl xcall_sync_tick xcall_sync_tick: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate .section .sun4v_2insn_patch, "ax" .word 661b nop nop .previous rdpr %pil, %g2 wrpr %g0, PIL_NORMAL_MAX, %pil sethi %hi(109f), %g7 b,pt %xcc, etrap_irq 109: or %g7, %lo(109b), %g7 #ifdef CONFIG_TRACE_IRQFLAGS call trace_hardirqs_off nop #endif call smp_synchronize_tick_client nop b rtrap_xcall ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 .globl xcall_fetch_glob_regs xcall_fetch_glob_regs: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 rdpr %tstate, %g7 stx %g7, [%g1 + GR_SNAP_TSTATE] rdpr %tpc, %g7 stx %g7, [%g1 + GR_SNAP_TPC] rdpr %tnpc, %g7 stx %g7, [%g1 + GR_SNAP_TNPC] stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ rdpr %cwp, %g3 sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 add %g7, %g2, %g7 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 stx %g3, [%g1 + GR_SNAP_THREAD] retry .globl xcall_fetch_glob_pmu xcall_fetch_glob_pmu: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 rd %pic, %g7 stx %g7, [%g1 + (4 * 8)] rd %pcr, %g7 stx %g7, [%g1 + (0 * 8)] retry .globl xcall_fetch_glob_pmu_n4 xcall_fetch_glob_pmu_n4: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 ldxa [%g0] ASI_PIC, %g7 stx %g7, [%g1 + (4 * 8)] mov 0x08, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (5 * 8)] mov 0x10, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (6 * 8)] mov 0x18, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (7 * 8)] mov %o0, %g2 mov %o1, %g3 mov %o5, %g7 mov HV_FAST_VT_GET_PERFREG, %o5 mov 3, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (3 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 2, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (2 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 1, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (1 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 0, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (0 * 8)] mov %g2, %o0 mov %g3, %o1 mov %g7, %o5 retry __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP membar #Sync brnz,pt %g3, 1b sub %g3, %g2, %g3 retry 2: mov 0x80, %g2 stxa %g0, [%g2] ASI_DMMU_DEMAP membar #Sync stxa %g0, [%g2] ASI_IMMU_DEMAP membar #Sync retry nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop #ifdef DCACHE_ALIASING_POSSIBLE .align 32 .globl xcall_flush_dcache_page_cheetah xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ sethi %hi(PAGE_SIZE), %g3 1: subcc %g3, (1 << 5), %g3 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE membar #Sync bne,pt %icc, 1b nop retry nop #endif /* DCACHE_ALIASING_POSSIBLE */ .globl xcall_flush_dcache_page_spitfire xcall_flush_dcache_page_spitfire: /* %g1 == physical page address %g7 == kernel page virtual address %g5 == (page->mapping != NULL) */ #ifdef DCACHE_ALIASING_POSSIBLE srlx %g1, (13 - 2), %g1 ! Form tag comparitor sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K sub %g3, (1 << 5), %g3 ! D$ linesize == 32 1: ldxa [%g3] ASI_DCACHE_TAG, %g2 andcc %g2, 0x3, %g0 be,pn %xcc, 2f andn %g2, 0x3, %g2 cmp %g2, %g1 bne,pt %xcc, 2f nop stxa %g0, [%g3] ASI_DCACHE_TAG membar #Sync 2: cmp %g3, 0 bne,pt %xcc, 1b sub %g3, (1 << 5), %g3 brz,pn %g5, 2f #endif /* DCACHE_ALIASING_POSSIBLE */ sethi %hi(PAGE_SIZE), %g3 1: flush %g7 subcc %g3, (1 << 5), %g3 bne,pt %icc, 1b add %g7, (1 << 5), %g7 2: retry nop nop /* %g5: error * %g6: tlb op */ __hypervisor_tlb_xcall_error: mov %g5, %g4 mov %g6, %g5 ba,pt %xcc, etrap rd %pc, %g7 mov %l4, %o0 call hypervisor_tlbop_error_xcall mov %l5, %o1 ba,a,pt %xcc, rtrap .globl __hypervisor_xcall_flush_tlb_mm __hypervisor_xcall_flush_tlb_mm: /* 24 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 mov %o3, %g1 mov %o5, %g7 clr %o0 /* ARG0: CPU lists unimplemented */ clr %o1 /* ARG1: CPU lists unimplemented */ mov %g5, %o2 /* ARG2: mmu context */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov HV_FAST_MMU_DEMAP_CTX, %g6 brnz,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 mov %g1, %o3 mov %g7, %o5 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop .globl __hypervisor_xcall_flush_tlb_page __hypervisor_xcall_flush_tlb_page: /* 20 insns */ /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop .globl __hypervisor_xcall_flush_tlb_kernel_range __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g7 add %g2, 1, %g2 sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 brnz,pn %g7, 2f mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,pn %o0, 1f mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 5: mov %g2, %o0 mov %g4, %o1 mov %g7, %o2 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop 2: mov %o3, %g1 mov %o5, %g3 mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov 0, %o2 /* ARG2: mmu context == nucleus */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov %g1, %o3 brz,pt %o0, 5b mov %g3, %o5 mov HV_FAST_MMU_DEMAP_CTX, %g6 ba,pt %xcc, 1b clr %g5 /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint retry .globl xcall_call_function_single xcall_call_function_single: wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint retry .globl xcall_receive_signal xcall_receive_signal: wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint retry .globl xcall_capture xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry #ifdef CONFIG_KGDB .globl xcall_kgdb_capture xcall_kgdb_capture: wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint retry #endif #endif /* CONFIG_SMP */ .globl cheetah_patch_cachetlbops cheetah_patch_cachetlbops: save %sp, -128, %sp sethi %hi(__flush_tlb_mm), %o0 or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__cheetah_flush_tlb_page), %o1 or %o1, %lo(__cheetah_flush_tlb_page), %o1 call tlb_patch_one mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 call tlb_patch_one mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__cheetah_flush_dcache_page), %o1 or %o1, %lo(__cheetah_flush_dcache_page), %o1 call tlb_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ #ifdef CONFIG_SMP sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 44, %o2 #endif /* CONFIG_SMP */ ret restore .globl hypervisor_patch_cachetlbops hypervisor_patch_cachetlbops: save %sp, -128, %sp sethi %hi(__flush_tlb_mm), %o0 or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__hypervisor_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 call tlb_patch_one mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__hypervisor_flush_dcache_page), %o1 or %o1, %lo(__hypervisor_flush_dcache_page), %o1 call tlb_patch_one mov 2, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ #ifdef CONFIG_SMP sethi %hi(xcall_flush_tlb_mm), %o0 or %o0, %lo(xcall_flush_tlb_mm), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one mov 24, %o2 sethi %hi(xcall_flush_tlb_page), %o0 or %o0, %lo(xcall_flush_tlb_page), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one mov 20, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 44, %o2 #endif /* CONFIG_SMP */ ret restore
aixcc-public/challenge-001-exemplar-source
6,160
arch/sparc/mm/viking.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * viking.S: High speed Viking cache/mmu operations * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz) */ #include <asm/ptrace.h> #include <asm/psr.h> #include <asm/asm-offsets.h> #include <asm/asi.h> #include <asm/mxcc.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgtsrmmu.h> #include <asm/viking.h> #ifdef CONFIG_SMP .data .align 4 sun4dsmp_flush_tlb_spin: .word 0 #endif .text .align 4 .globl viking_flush_cache_all, viking_flush_cache_mm .globl viking_flush_cache_range, viking_flush_cache_page .globl viking_flush_page, viking_mxcc_flush_page .globl viking_flush_page_for_dma, viking_flush_page_to_ram .globl viking_flush_sig_insns .globl viking_flush_tlb_all, viking_flush_tlb_mm .globl viking_flush_tlb_range, viking_flush_tlb_page viking_flush_page: sethi %hi(PAGE_OFFSET), %g2 sub %o0, %g2, %g3 srl %g3, 12, %g1 ! ppage >> 12 clr %o1 ! set counter, 0 - 127 sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3 sethi %hi(0x80000000), %o4 sethi %hi(VIKING_PTAG_VALID), %o5 sethi %hi(2*PAGE_SIZE), %o0 sethi %hi(PAGE_SIZE), %g7 clr %o2 ! block counter, 0 - 3 5: sll %o1, 5, %g4 or %g4, %o4, %g4 ! 0x80000000 | (set << 5) sll %o2, 26, %g5 ! block << 26 6: or %g5, %g4, %g5 ldda [%g5] ASI_M_DATAC_TAG, %g2 cmp %g3, %g1 ! ptag == ppage? bne 7f inc %o2 andcc %g2, %o5, %g0 ! ptag VALID? be 7f add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5) ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 b 8f ld [%g2 + %g7], %g3 7: cmp %o2, 3 ble 6b sll %o2, 26, %g5 ! block << 26 8: inc %o1 cmp %o1, 0x7f ble 5b clr %o2 9: retl nop viking_mxcc_flush_page: sethi %hi(PAGE_OFFSET), %g2 sub %o0, %g2, %g3 sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM) mov 0x10, %g2 ! set cacheable bit or %o3, %lo(MXCC_SRCSTREAM), %o2 or %o3, %lo(MXCC_DESSTREAM), %o3 sub %g3, MXCC_STREAM_SIZE, %g3 6: stda %g2, [%o2] ASI_M_MXCC stda %g2, [%o3] ASI_M_MXCC andncc %g3, PAGE_MASK, %g0 bne 6b sub %g3, MXCC_STREAM_SIZE, %g3 9: retl nop viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 bne viking_flush_cache_all nop b,a viking_flush_cache_out #endif viking_flush_cache_all: WINDOW_FLUSH(%g4, %g5) viking_flush_cache_out: retl nop viking_flush_tlb_all: mov 0x400, %g1 retl sta %g0, [%g1] ASI_M_FLUSH_PROBE viking_flush_tlb_mm: mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o1, -1 be 1f #endif mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 1: retl nop #endif viking_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be 2f #endif sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 1: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 2: retl nop #endif viking_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be 1f #endif and %o1, PAGE_MASK, %o1 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 1: retl nop #endif viking_flush_page_to_ram: viking_flush_page_for_dma: viking_flush_sig_insns: retl nop #ifdef CONFIG_SMP .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page sun4dsmp_flush_tlb_all: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov 0x400, %g1 sta %g0, [%g1] ASI_M_FLUSH_PROBE retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_mm: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_range: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 2: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 2b sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 3: tst %g5 bne,a 3b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_page: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b nop #endif
aixcc-public/challenge-001-exemplar-source
3,219
arch/sparc/mm/tsunami.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * tsunami.S: High speed MicroSparc-I mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/psr.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> .text .align 4 .globl tsunami_flush_cache_all, tsunami_flush_cache_mm .globl tsunami_flush_cache_range, tsunami_flush_cache_page .globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma .globl tsunami_flush_sig_insns .globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm .globl tsunami_flush_tlb_range, tsunami_flush_tlb_page /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be tsunami_flush_cache_out tsunami_flush_cache_all: WINDOW_FLUSH(%g4, %g5) tsunami_flush_page_for_dma: sta %g0, [%g0] ASI_M_IC_FLCLEAR sta %g0, [%g0] ASI_M_DC_FLCLEAR tsunami_flush_cache_out: tsunami_flush_page_to_ram: retl nop tsunami_flush_sig_insns: flush %o1 retl flush %o1 + 4 /* More slick stuff... */ tsunami_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be tsunami_flush_tlb_out tsunami_flush_tlb_all: mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE nop nop nop nop nop tsunami_flush_tlb_out: retl nop /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 cmp %o3, -1 be tsunami_flush_tlb_page_out lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE nop nop nop nop nop tsunami_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \ ldd [src + offset + 0x18], t0; \ std t0, [dst + offset + 0x18]; \ ldd [src + offset + 0x10], t2; \ std t2, [dst + offset + 0x10]; \ ldd [src + offset + 0x08], t0; \ std t0, [dst + offset + 0x08]; \ ldd [src + offset + 0x00], t2; \ std t2, [dst + offset + 0x00]; tsunami_copy_1page: /* NOTE: This routine has to be shorter than 70insns --jj */ or %g0, (PAGE_SIZE >> 8), %g1 1: MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5) subcc %g1, 1, %g1 add %o0, 0x100, %o0 bne 1b add %o1, 0x100, %o1 .globl tsunami_setup_blockops tsunami_setup_blockops: sethi %hi(__copy_1page), %o0 or %o0, %lo(__copy_1page), %o0 sethi %hi(tsunami_copy_1page), %o1 or %o1, %lo(tsunami_copy_1page), %o1 sethi %hi(tsunami_setup_blockops), %o2 or %o2, %lo(tsunami_setup_blockops), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sta %g0, [%g0] ASI_M_IC_FLCLEAR sta %g0, [%g0] ASI_M_DC_FLCLEAR retl nop
aixcc-public/challenge-001-exemplar-source
5,349
arch/sparc/mm/swift.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * swift.S: MicroSparc-II mmu/cache operations. * * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ #include <asm/psr.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> #include <asm/asm-offsets.h> .text .align 4 #if 1 /* XXX screw this, I can't get the VAC flushes working * XXX reliably... -DaveM */ .globl swift_flush_cache_all, swift_flush_cache_mm .globl swift_flush_cache_range, swift_flush_cache_page .globl swift_flush_page_for_dma .globl swift_flush_page_to_ram swift_flush_cache_all: swift_flush_cache_mm: swift_flush_cache_range: swift_flush_cache_page: swift_flush_page_for_dma: swift_flush_page_to_ram: sethi %hi(0x2000), %o0 1: subcc %o0, 0x10, %o0 add %o0, %o0, %o1 sta %g0, [%o0] ASI_M_DATAC_TAG bne 1b sta %g0, [%o1] ASI_M_TXTC_TAG retl nop #else .globl swift_flush_cache_all swift_flush_cache_all: WINDOW_FLUSH(%g4, %g5) /* Just clear out all the tags. */ sethi %hi(16 * 1024), %o0 1: subcc %o0, 16, %o0 sta %g0, [%o0] ASI_M_TXTC_TAG bne 1b sta %g0, [%o0] ASI_M_DATAC_TAG retl nop .globl swift_flush_cache_mm swift_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_cache_mm_out WINDOW_FLUSH(%g4, %g5) rd %psr, %g1 andn %g1, PSR_ET, %g3 wr %g3, 0x0, %psr nop nop mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %g5 sta %g2, [%g7] ASI_M_MMUREGS #if 1 sethi %hi(0x2000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o0] ASI_M_FLUSH_CTX bne 1b nop #else clr %o0 or %g0, 2048, %g7 or %g0, 2048, %o1 add %o1, 2048, %o2 add %o2, 2048, %o3 mov 16, %o4 add %o4, 2048, %o5 add %o5, 2048, %g2 add %g2, 2048, %g3 1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX subcc %g7, 32, %g7 bne 1b add %o0, 32, %o0 #endif mov SRMMU_CTX_REG, %g7 sta %g5, [%g7] ASI_M_MMUREGS wr %g1, 0x0, %psr nop nop swift_flush_cache_mm_out: retl nop .globl swift_flush_cache_range swift_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 bgu swift_flush_cache_mm nop b 70f nop .globl swift_flush_cache_page swift_flush_cache_page: ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_cache_page_out WINDOW_FLUSH(%g4, %g5) rd %psr, %g1 andn %g1, PSR_ET, %g3 wr %g3, 0x0, %psr nop nop mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %g5 sta %g2, [%g7] ASI_M_MMUREGS andn %o1, (PAGE_SIZE - 1), %o1 #if 1 sethi %hi(0x1000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE bne 1b nop #else or %g0, 512, %g7 or %g0, 512, %o0 add %o0, 512, %o2 add %o2, 512, %o3 add %o3, 512, %o4 add %o4, 512, %o5 add %o5, 512, %g3 add %g3, 512, %g4 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE subcc %g7, 16, %g7 bne 1b add %o1, 16, %o1 #endif mov SRMMU_CTX_REG, %g7 sta %g5, [%g7] ASI_M_MMUREGS wr %g1, 0x0, %psr nop nop swift_flush_cache_page_out: retl nop /* Swift is write-thru, however it is not * I/O nor TLB-walk coherent. Also it has * caches which are virtually indexed and tagged. */ .globl swift_flush_page_for_dma .globl swift_flush_page_to_ram swift_flush_page_for_dma: swift_flush_page_to_ram: andn %o0, (PAGE_SIZE - 1), %o1 #if 1 sethi %hi(0x1000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE bne 1b nop #else or %g0, 512, %g7 or %g0, 512, %o0 add %o0, 512, %o2 add %o2, 512, %o3 add %o3, 512, %o4 add %o4, 512, %o5 add %o5, 512, %g3 add %g3, 512, %g4 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE subcc %g7, 16, %g7 bne 1b add %o1, 16, %o1 #endif retl nop #endif .globl swift_flush_sig_insns swift_flush_sig_insns: flush %o1 retl flush %o1 + 4 .globl swift_flush_tlb_mm .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_tlb_all_out swift_flush_tlb_all: mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE swift_flush_tlb_all_out: retl nop .globl swift_flush_tlb_page swift_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 cmp %o3, -1 be swift_flush_tlb_page_out nop #if 1 mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE #else lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */ sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS #endif swift_flush_tlb_page_out: retl nop
aixcc-public/challenge-001-exemplar-source
1,937
arch/sparc/mm/srmmu_access.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Assembler variants of srmmu access functions. * Implemented in assembler to allow run-time patching. * LEON uses a different ASI for MMUREGS than SUN. * * The leon_1insn_patch infrastructure is used * for the run-time patching. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/pgtsrmmu.h> #include <asm/asi.h> /* unsigned int srmmu_get_mmureg(void) */ ENTRY(srmmu_get_mmureg) LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_mmureg) /* void srmmu_set_mmureg(unsigned long regval) */ ENTRY(srmmu_set_mmureg) LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS) SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_mmureg) /* void srmmu_set_ctable_ptr(unsigned long paddr) */ ENTRY(srmmu_set_ctable_ptr) /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */ srl %o0, 4, %g1 and %g1, SRMMU_CTX_PMASK, %g1 mov SRMMU_CTXTBL_PTR, %g2 LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS) SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_ctable_ptr) /* void srmmu_set_context(int context) */ ENTRY(srmmu_set_context) mov SRMMU_CTX_REG, %g1 LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS) SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_context) /* int srmmu_get_context(void) */ ENTRY(srmmu_get_context) mov SRMMU_CTX_REG, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_context) /* unsigned int srmmu_get_fstatus(void) */ ENTRY(srmmu_get_fstatus) mov SRMMU_FAULT_STATUS, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_fstatus) /* unsigned int srmmu_get_faddr(void) */ ENTRY(srmmu_get_faddr) mov SRMMU_FAULT_ADDR, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_faddr)
ajeet17181/mplayer-android
1,852
loader/wrapper.S
#include "config.h" #define GLUE(a, b) a ## b #define JOIN(a, b) GLUE(a, b) #define MANGLE(s) JOIN(EXTERN_ASM, s) .data .globl MANGLE(caller_return) MANGLE(caller_return): .long 0 .globl MANGLE(report_entry) MANGLE(report_entry): .long MANGLE(null_call) .globl MANGLE(report_ret) MANGLE(report_ret): .long MANGLE(null_call) .global MANGLE(wrapper_target) MANGLE(wrapper_target): .long MANGLE(null_call) .text .globl MANGLE(null_call) .type MANGLE(null_call), @function .balign 16,0x90 MANGLE(null_call): ret .globl MANGLE(wrapper) .type MANGLE(wrapper), @function .balign 16,0x90 MANGLE(wrapper): pusha # store registers (EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI) pushf # store flags push %ebp # set up a stack frame movl %esp, %ebp leal 4(%ebp), %eax # push flags addr push %eax leal 8(%ebp), %eax # push registers addr push %eax leal 40(%ebp), %edx movl (%ebp), %eax subl %edx, %eax push %eax push %edx call *MANGLE(report_entry) # report entry test %eax, %eax jnz .Ldone leave # restore %esp, %ebp popf # restore flags popa # restore registers popl MANGLE(caller_return) # switch return addresses pushl $.Lwrapper_return jmp *MANGLE(wrapper_target) # wrapper_target should return at .Lwrapper_return .balign 16, 0x90 .Lwrapper_return: pushl MANGLE(caller_return) # restore the original return address pusha # more for reference sake here pushf push %ebp # set up a stack frame movl %esp, %ebp leal 4(%ebp), %eax # push flags addr push %eax leal 8(%ebp), %eax # push registers addr push %eax leal 40(%ebp), %edx # push stack top address (relative to our entry) movl (%ebp), %eax subl %edx, %eax # calculate difference between entry and previous frame push %eax push %edx call *MANGLE(report_ret) # report the return information (same args) .Ldone: leave popf popa ret
aixcc-public/challenge-001-exemplar-source
28,153
arch/ia64/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Here is where the ball gets rolling as far as the kernel is concerned. * When control is transferred to _start, the bootload has already * loaded us to the correct address. All that's left to do here is * to set up the kernel's global pointer and jump to the kernel * entry point. * * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Intel Corp. * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> * Support for CPU Hotplug */ #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/fpu.h> #include <asm/kregs.h> #include <asm/mmu_context.h> #include <asm/asm-offsets.h> #include <asm/pal.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU #define SAL_PSR_BITS_TO_SET \ (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL) #define SAVE_FROM_REG(src, ptr, dest) \ mov dest=src;; \ st8 [ptr]=dest,0x08 #define RESTORE_REG(reg, ptr, _tmp) \ ld8 _tmp=[ptr],0x08;; \ mov reg=_tmp #define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\ mov ar.lc=IA64_NUM_DBG_REGS-1;; \ mov _idx=0;; \ 1: \ SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \ add _idx=1,_idx;; \ br.cloop.sptk.many 1b #define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\ mov ar.lc=IA64_NUM_DBG_REGS-1;; \ mov _idx=0;; \ _lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \ add _idx=1, _idx;; \ br.cloop.sptk.many _lbl #define SAVE_ONE_RR(num, _reg, _tmp) \ movl _tmp=(num<<61);; \ mov _reg=rr[_tmp] #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ SAVE_ONE_RR(0,_r0, _tmp);; \ SAVE_ONE_RR(1,_r1, _tmp);; \ SAVE_ONE_RR(2,_r2, _tmp);; \ SAVE_ONE_RR(3,_r3, _tmp);; \ SAVE_ONE_RR(4,_r4, _tmp);; \ SAVE_ONE_RR(5,_r5, _tmp);; \ SAVE_ONE_RR(6,_r6, _tmp);; \ SAVE_ONE_RR(7,_r7, _tmp);; #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ st8 [ptr]=_r0, 8;; \ st8 [ptr]=_r1, 8;; \ st8 [ptr]=_r2, 8;; \ st8 [ptr]=_r3, 8;; \ st8 [ptr]=_r4, 8;; \ st8 [ptr]=_r5, 8;; \ st8 [ptr]=_r6, 8;; \ st8 [ptr]=_r7, 8;; #define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \ mov ar.lc=0x08-1;; \ movl _idx1=0x00;; \ RestRR: \ dep.z _idx2=_idx1,61,3;; \ ld8 _tmp=[ptr],8;; \ mov rr[_idx2]=_tmp;; \ srlz.d;; \ add _idx1=1,_idx1;; \ br.cloop.sptk.few RestRR #define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \ movl reg1=sal_state_for_booting_cpu;; \ ld8 reg2=[reg1];; /* * Adjust region registers saved before starting to save * break regs and rest of the states that need to be preserved. */ #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \ SAVE_FROM_REG(b0,_reg1,_reg2);; \ SAVE_FROM_REG(b1,_reg1,_reg2);; \ SAVE_FROM_REG(b2,_reg1,_reg2);; \ SAVE_FROM_REG(b3,_reg1,_reg2);; \ SAVE_FROM_REG(b4,_reg1,_reg2);; \ SAVE_FROM_REG(b5,_reg1,_reg2);; \ st8 [_reg1]=r1,0x08;; \ st8 [_reg1]=r12,0x08;; \ st8 [_reg1]=r13,0x08;; \ SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \ SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \ SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \ SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \ SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \ st8 [_reg1]=r4,0x08;; \ st8 [_reg1]=r5,0x08;; \ st8 [_reg1]=r6,0x08;; \ st8 [_reg1]=r7,0x08;; \ st8 [_reg1]=_pred,0x08;; \ SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ stf.spill.nta [_reg1]=f2,16;; \ stf.spill.nta [_reg1]=f3,16;; \ stf.spill.nta [_reg1]=f4,16;; \ stf.spill.nta [_reg1]=f5,16;; \ stf.spill.nta [_reg1]=f16,16;; \ stf.spill.nta [_reg1]=f17,16;; \ stf.spill.nta [_reg1]=f18,16;; \ stf.spill.nta [_reg1]=f19,16;; \ stf.spill.nta [_reg1]=f20,16;; \ stf.spill.nta [_reg1]=f21,16;; \ stf.spill.nta [_reg1]=f22,16;; \ stf.spill.nta [_reg1]=f23,16;; \ stf.spill.nta [_reg1]=f24,16;; \ stf.spill.nta [_reg1]=f25,16;; \ stf.spill.nta [_reg1]=f26,16;; \ stf.spill.nta [_reg1]=f27,16;; \ stf.spill.nta [_reg1]=f28,16;; \ stf.spill.nta [_reg1]=f29,16;; \ stf.spill.nta [_reg1]=f30,16;; \ stf.spill.nta [_reg1]=f31,16;; #else #define SET_AREA_FOR_BOOTING_CPU(a1, a2) #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3) #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) #endif #define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \ movl _tmp1=(num << 61);; \ mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ mov rr[_tmp1]=_tmp2 __PAGE_ALIGNED_DATA .global empty_zero_page EXPORT_DATA_SYMBOL_GPL(empty_zero_page) empty_zero_page: .skip PAGE_SIZE .global swapper_pg_dir swapper_pg_dir: .skip PAGE_SIZE .rodata halt_msg: stringz "Halting kernel\n" __REF .global start_ap /* * Start the kernel. When the bootloader passes control to _start(), r28 * points to the address of the boot parameter area. Execution reaches * here in physical mode. */ GLOBAL_ENTRY(_start) start_ap: .prologue .save rp, r0 // terminate unwind chain with a NULL rp .body rsm psr.i | psr.ic ;; srlz.i ;; { flushrs // must be first insn in group srlz.i } ;; /* * Save the region registers, predicate before they get clobbered */ SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15); mov r25=pr;; /* * Initialize kernel region registers: * rr[0]: VHPT enabled, page size = PAGE_SHIFT * rr[1]: VHPT enabled, page size = PAGE_SHIFT * rr[2]: VHPT enabled, page size = PAGE_SHIFT * rr[3]: VHPT enabled, page size = PAGE_SHIFT * rr[4]: VHPT enabled, page size = PAGE_SHIFT * rr[5]: VHPT enabled, page size = PAGE_SHIFT * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT * rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT * We initialize all of them to prevent inadvertently assuming * something about the state of address translation early in boot. */ SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);; SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);; /* * Now pin mappings into the TLB for kernel text and data */ mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL mov r3=ip movl r18=PAGE_KERNEL ;; dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT ;; or r18=r2,r18 ;; srlz.i ;; itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 ;; srlz.i /* * Switch into virtual mode: */ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ |IA64_PSR_DI) ;; mov cr.ipsr=r16 movl r17=1f ;; mov cr.iip=r17 mov cr.ifs=r0 ;; rfi ;; 1: // now we are in virtual mode SET_AREA_FOR_BOOTING_CPU(r2, r16); STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15); SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25) ;; // set IVT entry point---can't access I/O ports without it movl r3=ia64_ivt ;; mov cr.iva=r3 movl r2=FPSR_DEFAULT ;; srlz.i movl gp=__gp mov ar.fpsr=r2 ;; #define isAP p2 // are we an Application Processor? #define isBP p3 // are we the Bootstrap Processor? #ifdef CONFIG_SMP /* * Find the init_task for the currently booting CPU. At poweron, and in * UP mode, task_for_booting_cpu is NULL. */ movl r3=task_for_booting_cpu ;; ld8 r3=[r3] movl r2=init_task ;; cmp.eq isBP,isAP=r3,r0 ;; (isAP) mov r2=r3 #else movl r2=init_task cmp.eq isBP,isAP=r0,r0 #endif ;; tpa r3=r2 // r3 == phys addr of task struct mov r16=-1 (isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it // load mapping for stack (virtaddr in r2, physaddr in r3) rsm psr.ic movl r17=PAGE_KERNEL ;; srlz.d dep r18=0,r3,0,12 ;; or r18=r17,r18 dep r2=-1,r3,61,3 // IMVA of task ;; mov r17=rr[r2] shr.u r16=r3,IA64_GRANULE_SHIFT ;; dep r17=0,r17,8,24 ;; mov cr.itir=r17 mov cr.ifa=r2 mov r19=IA64_TR_CURRENT_STACK ;; itr.d dtr[r19]=r18 ;; ssm psr.ic srlz.d ;; .load_current: // load the "current" pointer (r13) and ar.k6 with the current task mov IA64_KR(CURRENT)=r2 // virtual address mov IA64_KR(CURRENT_STACK)=r16 mov r13=r2 /* * Reserve space at the top of the stack for "struct pt_regs". Kernel * threads don't store interesting values in that structure, but the space * still needs to be there because time-critical stuff such as the context * switching can be implemented more efficiently (for example, __switch_to() * always sets the psr.dfh bit of the task it is switching to). */ addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE mov ar.rsc=0 // place RSE in enforced lazy mode ;; loadrs // clear the dirty partition movl r19=__phys_per_cpu_start mov r18=PERCPU_PAGE_SIZE ;; #ifndef CONFIG_SMP add r19=r19,r18 ;; #else (isAP) br.few 2f movl r20=__cpu0_per_cpu ;; shr.u r18=r18,3 1: ld8 r21=[r19],8;; st8[r20]=r21,8 adds r18=-1,r18;; cmp4.lt p7,p6=0,r18 (p7) br.cond.dptk.few 1b mov r19=r20 ;; 2: #endif tpa r19=r19 ;; .pred.rel.mutex isBP,isAP (isBP) mov IA64_KR(PER_CPU_DATA)=r19 // per-CPU base for cpu0 (isAP) mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base ;; mov ar.bspstore=r2 // establish the new RSE stack ;; mov ar.rsc=0x3 // place RSE in eager mode (isBP) dep r28=-1,r28,61,3 // make address virtual (isBP) movl r2=ia64_boot_param ;; (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader #ifdef CONFIG_SMP (isAP) br.call.sptk.many rp=start_secondary .ret0: (isAP) br.cond.sptk self #endif // This is executed by the bootstrap processor (bsp) only: br.call.sptk.many rp=start_kernel .ret2: addl r3=@ltoff(halt_msg),gp ;; alloc r2=ar.pfs,8,0,2,0 ;; ld8 out0=[r3] br.call.sptk.many b0=console_print self: hint @pause br.sptk.many self // endless loop END(_start) .text GLOBAL_ENTRY(ia64_save_debug_regs) alloc r16=ar.pfs,1,0,0,0 mov r20=ar.lc // preserve ar.lc mov ar.lc=IA64_NUM_DBG_REGS-1 mov r18=0 add r19=IA64_NUM_DBG_REGS*8,in0 ;; 1: mov r16=dbr[r18] #ifdef CONFIG_ITANIUM ;; srlz.d #endif mov r17=ibr[r18] add r18=1,r18 ;; st8.nta [in0]=r16,8 st8.nta [r19]=r17,8 br.cloop.sptk.many 1b ;; mov ar.lc=r20 // restore ar.lc br.ret.sptk.many rp END(ia64_save_debug_regs) GLOBAL_ENTRY(ia64_load_debug_regs) alloc r16=ar.pfs,1,0,0,0 lfetch.nta [in0] mov r20=ar.lc // preserve ar.lc add r19=IA64_NUM_DBG_REGS*8,in0 mov ar.lc=IA64_NUM_DBG_REGS-1 mov r18=-1 ;; 1: ld8.nta r16=[in0],8 ld8.nta r17=[r19],8 add r18=1,r18 ;; mov dbr[r18]=r16 #ifdef CONFIG_ITANIUM ;; srlz.d // Errata 132 (NoFix status) #endif mov ibr[r18]=r17 br.cloop.sptk.many 1b ;; mov ar.lc=r20 // restore ar.lc br.ret.sptk.many rp END(ia64_load_debug_regs) GLOBAL_ENTRY(__ia64_save_fpu) alloc r2=ar.pfs,1,4,0,0 adds loc0=96*16-16,in0 adds loc1=96*16-16-128,in0 ;; stf.spill.nta [loc0]=f127,-256 stf.spill.nta [loc1]=f119,-256 ;; stf.spill.nta [loc0]=f111,-256 stf.spill.nta [loc1]=f103,-256 ;; stf.spill.nta [loc0]=f95,-256 stf.spill.nta [loc1]=f87,-256 ;; stf.spill.nta [loc0]=f79,-256 stf.spill.nta [loc1]=f71,-256 ;; stf.spill.nta [loc0]=f63,-256 stf.spill.nta [loc1]=f55,-256 adds loc2=96*16-32,in0 ;; stf.spill.nta [loc0]=f47,-256 stf.spill.nta [loc1]=f39,-256 adds loc3=96*16-32-128,in0 ;; stf.spill.nta [loc2]=f126,-256 stf.spill.nta [loc3]=f118,-256 ;; stf.spill.nta [loc2]=f110,-256 stf.spill.nta [loc3]=f102,-256 ;; stf.spill.nta [loc2]=f94,-256 stf.spill.nta [loc3]=f86,-256 ;; stf.spill.nta [loc2]=f78,-256 stf.spill.nta [loc3]=f70,-256 ;; stf.spill.nta [loc2]=f62,-256 stf.spill.nta [loc3]=f54,-256 adds loc0=96*16-48,in0 ;; stf.spill.nta [loc2]=f46,-256 stf.spill.nta [loc3]=f38,-256 adds loc1=96*16-48-128,in0 ;; stf.spill.nta [loc0]=f125,-256 stf.spill.nta [loc1]=f117,-256 ;; stf.spill.nta [loc0]=f109,-256 stf.spill.nta [loc1]=f101,-256 ;; stf.spill.nta [loc0]=f93,-256 stf.spill.nta [loc1]=f85,-256 ;; stf.spill.nta [loc0]=f77,-256 stf.spill.nta [loc1]=f69,-256 ;; stf.spill.nta [loc0]=f61,-256 stf.spill.nta [loc1]=f53,-256 adds loc2=96*16-64,in0 ;; stf.spill.nta [loc0]=f45,-256 stf.spill.nta [loc1]=f37,-256 adds loc3=96*16-64-128,in0 ;; stf.spill.nta [loc2]=f124,-256 stf.spill.nta [loc3]=f116,-256 ;; stf.spill.nta [loc2]=f108,-256 stf.spill.nta [loc3]=f100,-256 ;; stf.spill.nta [loc2]=f92,-256 stf.spill.nta [loc3]=f84,-256 ;; stf.spill.nta [loc2]=f76,-256 stf.spill.nta [loc3]=f68,-256 ;; stf.spill.nta [loc2]=f60,-256 stf.spill.nta [loc3]=f52,-256 adds loc0=96*16-80,in0 ;; stf.spill.nta [loc2]=f44,-256 stf.spill.nta [loc3]=f36,-256 adds loc1=96*16-80-128,in0 ;; stf.spill.nta [loc0]=f123,-256 stf.spill.nta [loc1]=f115,-256 ;; stf.spill.nta [loc0]=f107,-256 stf.spill.nta [loc1]=f99,-256 ;; stf.spill.nta [loc0]=f91,-256 stf.spill.nta [loc1]=f83,-256 ;; stf.spill.nta [loc0]=f75,-256 stf.spill.nta [loc1]=f67,-256 ;; stf.spill.nta [loc0]=f59,-256 stf.spill.nta [loc1]=f51,-256 adds loc2=96*16-96,in0 ;; stf.spill.nta [loc0]=f43,-256 stf.spill.nta [loc1]=f35,-256 adds loc3=96*16-96-128,in0 ;; stf.spill.nta [loc2]=f122,-256 stf.spill.nta [loc3]=f114,-256 ;; stf.spill.nta [loc2]=f106,-256 stf.spill.nta [loc3]=f98,-256 ;; stf.spill.nta [loc2]=f90,-256 stf.spill.nta [loc3]=f82,-256 ;; stf.spill.nta [loc2]=f74,-256 stf.spill.nta [loc3]=f66,-256 ;; stf.spill.nta [loc2]=f58,-256 stf.spill.nta [loc3]=f50,-256 adds loc0=96*16-112,in0 ;; stf.spill.nta [loc2]=f42,-256 stf.spill.nta [loc3]=f34,-256 adds loc1=96*16-112-128,in0 ;; stf.spill.nta [loc0]=f121,-256 stf.spill.nta [loc1]=f113,-256 ;; stf.spill.nta [loc0]=f105,-256 stf.spill.nta [loc1]=f97,-256 ;; stf.spill.nta [loc0]=f89,-256 stf.spill.nta [loc1]=f81,-256 ;; stf.spill.nta [loc0]=f73,-256 stf.spill.nta [loc1]=f65,-256 ;; stf.spill.nta [loc0]=f57,-256 stf.spill.nta [loc1]=f49,-256 adds loc2=96*16-128,in0 ;; stf.spill.nta [loc0]=f41,-256 stf.spill.nta [loc1]=f33,-256 adds loc3=96*16-128-128,in0 ;; stf.spill.nta [loc2]=f120,-256 stf.spill.nta [loc3]=f112,-256 ;; stf.spill.nta [loc2]=f104,-256 stf.spill.nta [loc3]=f96,-256 ;; stf.spill.nta [loc2]=f88,-256 stf.spill.nta [loc3]=f80,-256 ;; stf.spill.nta [loc2]=f72,-256 stf.spill.nta [loc3]=f64,-256 ;; stf.spill.nta [loc2]=f56,-256 stf.spill.nta [loc3]=f48,-256 ;; stf.spill.nta [loc2]=f40 stf.spill.nta [loc3]=f32 br.ret.sptk.many rp END(__ia64_save_fpu) GLOBAL_ENTRY(__ia64_load_fpu) alloc r2=ar.pfs,1,2,0,0 adds r3=128,in0 adds r14=256,in0 adds r15=384,in0 mov loc0=512 mov loc1=-1024+16 ;; ldf.fill.nta f32=[in0],loc0 ldf.fill.nta f40=[ r3],loc0 ldf.fill.nta f48=[r14],loc0 ldf.fill.nta f56=[r15],loc0 ;; ldf.fill.nta f64=[in0],loc0 ldf.fill.nta f72=[ r3],loc0 ldf.fill.nta f80=[r14],loc0 ldf.fill.nta f88=[r15],loc0 ;; ldf.fill.nta f96=[in0],loc1 ldf.fill.nta f104=[ r3],loc1 ldf.fill.nta f112=[r14],loc1 ldf.fill.nta f120=[r15],loc1 ;; ldf.fill.nta f33=[in0],loc0 ldf.fill.nta f41=[ r3],loc0 ldf.fill.nta f49=[r14],loc0 ldf.fill.nta f57=[r15],loc0 ;; ldf.fill.nta f65=[in0],loc0 ldf.fill.nta f73=[ r3],loc0 ldf.fill.nta f81=[r14],loc0 ldf.fill.nta f89=[r15],loc0 ;; ldf.fill.nta f97=[in0],loc1 ldf.fill.nta f105=[ r3],loc1 ldf.fill.nta f113=[r14],loc1 ldf.fill.nta f121=[r15],loc1 ;; ldf.fill.nta f34=[in0],loc0 ldf.fill.nta f42=[ r3],loc0 ldf.fill.nta f50=[r14],loc0 ldf.fill.nta f58=[r15],loc0 ;; ldf.fill.nta f66=[in0],loc0 ldf.fill.nta f74=[ r3],loc0 ldf.fill.nta f82=[r14],loc0 ldf.fill.nta f90=[r15],loc0 ;; ldf.fill.nta f98=[in0],loc1 ldf.fill.nta f106=[ r3],loc1 ldf.fill.nta f114=[r14],loc1 ldf.fill.nta f122=[r15],loc1 ;; ldf.fill.nta f35=[in0],loc0 ldf.fill.nta f43=[ r3],loc0 ldf.fill.nta f51=[r14],loc0 ldf.fill.nta f59=[r15],loc0 ;; ldf.fill.nta f67=[in0],loc0 ldf.fill.nta f75=[ r3],loc0 ldf.fill.nta f83=[r14],loc0 ldf.fill.nta f91=[r15],loc0 ;; ldf.fill.nta f99=[in0],loc1 ldf.fill.nta f107=[ r3],loc1 ldf.fill.nta f115=[r14],loc1 ldf.fill.nta f123=[r15],loc1 ;; ldf.fill.nta f36=[in0],loc0 ldf.fill.nta f44=[ r3],loc0 ldf.fill.nta f52=[r14],loc0 ldf.fill.nta f60=[r15],loc0 ;; ldf.fill.nta f68=[in0],loc0 ldf.fill.nta f76=[ r3],loc0 ldf.fill.nta f84=[r14],loc0 ldf.fill.nta f92=[r15],loc0 ;; ldf.fill.nta f100=[in0],loc1 ldf.fill.nta f108=[ r3],loc1 ldf.fill.nta f116=[r14],loc1 ldf.fill.nta f124=[r15],loc1 ;; ldf.fill.nta f37=[in0],loc0 ldf.fill.nta f45=[ r3],loc0 ldf.fill.nta f53=[r14],loc0 ldf.fill.nta f61=[r15],loc0 ;; ldf.fill.nta f69=[in0],loc0 ldf.fill.nta f77=[ r3],loc0 ldf.fill.nta f85=[r14],loc0 ldf.fill.nta f93=[r15],loc0 ;; ldf.fill.nta f101=[in0],loc1 ldf.fill.nta f109=[ r3],loc1 ldf.fill.nta f117=[r14],loc1 ldf.fill.nta f125=[r15],loc1 ;; ldf.fill.nta f38 =[in0],loc0 ldf.fill.nta f46 =[ r3],loc0 ldf.fill.nta f54 =[r14],loc0 ldf.fill.nta f62 =[r15],loc0 ;; ldf.fill.nta f70 =[in0],loc0 ldf.fill.nta f78 =[ r3],loc0 ldf.fill.nta f86 =[r14],loc0 ldf.fill.nta f94 =[r15],loc0 ;; ldf.fill.nta f102=[in0],loc1 ldf.fill.nta f110=[ r3],loc1 ldf.fill.nta f118=[r14],loc1 ldf.fill.nta f126=[r15],loc1 ;; ldf.fill.nta f39 =[in0],loc0 ldf.fill.nta f47 =[ r3],loc0 ldf.fill.nta f55 =[r14],loc0 ldf.fill.nta f63 =[r15],loc0 ;; ldf.fill.nta f71 =[in0],loc0 ldf.fill.nta f79 =[ r3],loc0 ldf.fill.nta f87 =[r14],loc0 ldf.fill.nta f95 =[r15],loc0 ;; ldf.fill.nta f103=[in0] ldf.fill.nta f111=[ r3] ldf.fill.nta f119=[r14] ldf.fill.nta f127=[r15] br.ret.sptk.many rp END(__ia64_load_fpu) GLOBAL_ENTRY(__ia64_init_fpu) stf.spill [sp]=f0 // M3 mov f32=f0 // F nop.b 0 ldfps f33,f34=[sp] // M0 ldfps f35,f36=[sp] // M1 mov f37=f0 // F ;; setf.s f38=r0 // M2 setf.s f39=r0 // M3 mov f40=f0 // F ldfps f41,f42=[sp] // M0 ldfps f43,f44=[sp] // M1 mov f45=f0 // F setf.s f46=r0 // M2 setf.s f47=r0 // M3 mov f48=f0 // F ldfps f49,f50=[sp] // M0 ldfps f51,f52=[sp] // M1 mov f53=f0 // F setf.s f54=r0 // M2 setf.s f55=r0 // M3 mov f56=f0 // F ldfps f57,f58=[sp] // M0 ldfps f59,f60=[sp] // M1 mov f61=f0 // F setf.s f62=r0 // M2 setf.s f63=r0 // M3 mov f64=f0 // F ldfps f65,f66=[sp] // M0 ldfps f67,f68=[sp] // M1 mov f69=f0 // F setf.s f70=r0 // M2 setf.s f71=r0 // M3 mov f72=f0 // F ldfps f73,f74=[sp] // M0 ldfps f75,f76=[sp] // M1 mov f77=f0 // F setf.s f78=r0 // M2 setf.s f79=r0 // M3 mov f80=f0 // F ldfps f81,f82=[sp] // M0 ldfps f83,f84=[sp] // M1 mov f85=f0 // F setf.s f86=r0 // M2 setf.s f87=r0 // M3 mov f88=f0 // F /* * When the instructions are cached, it would be faster to initialize * the remaining registers with simply mov instructions (F-unit). * This gets the time down to ~29 cycles. However, this would use up * 33 bundles, whereas continuing with the above pattern yields * 10 bundles and ~30 cycles. */ ldfps f89,f90=[sp] // M0 ldfps f91,f92=[sp] // M1 mov f93=f0 // F setf.s f94=r0 // M2 setf.s f95=r0 // M3 mov f96=f0 // F ldfps f97,f98=[sp] // M0 ldfps f99,f100=[sp] // M1 mov f101=f0 // F setf.s f102=r0 // M2 setf.s f103=r0 // M3 mov f104=f0 // F ldfps f105,f106=[sp] // M0 ldfps f107,f108=[sp] // M1 mov f109=f0 // F setf.s f110=r0 // M2 setf.s f111=r0 // M3 mov f112=f0 // F ldfps f113,f114=[sp] // M0 ldfps f115,f116=[sp] // M1 mov f117=f0 // F setf.s f118=r0 // M2 setf.s f119=r0 // M3 mov f120=f0 // F ldfps f121,f122=[sp] // M0 ldfps f123,f124=[sp] // M1 mov f125=f0 // F setf.s f126=r0 // M2 setf.s f127=r0 // M3 br.ret.sptk.many rp // F END(__ia64_init_fpu) /* * Switch execution mode from virtual to physical * * Inputs: * r16 = new psr to establish * Output: * r19 = old virtual address of ar.bsp * r20 = old virtual address of sp * * Note: RSE must already be in enforced lazy mode */ GLOBAL_ENTRY(ia64_switch_mode_phys) { rsm psr.i | psr.ic // disable interrupts and interrupt collection mov r15=ip } ;; { flushrs // must be first insn in group srlz.i } ;; mov cr.ipsr=r16 // set new PSR add r3=1f-ia64_switch_mode_phys,r15 mov r19=ar.bsp mov r20=sp mov r14=rp // get return address into a general register ;; // going to physical mode, use tpa to translate virt->phys tpa r17=r19 tpa r3=r3 tpa sp=sp tpa r14=r14 ;; mov r18=ar.rnat // save ar.rnat mov ar.bspstore=r17 // this steps on ar.rnat mov cr.iip=r3 mov cr.ifs=r0 ;; mov ar.rnat=r18 // restore ar.rnat rfi // must be last insn in group ;; 1: mov rp=r14 br.ret.sptk.many rp END(ia64_switch_mode_phys) /* * Switch execution mode from physical to virtual * * Inputs: * r16 = new psr to establish * r19 = new bspstore to establish * r20 = new sp to establish * * Note: RSE must already be in enforced lazy mode */ GLOBAL_ENTRY(ia64_switch_mode_virt) { rsm psr.i | psr.ic // disable interrupts and interrupt collection mov r15=ip } ;; { flushrs // must be first insn in group srlz.i } ;; mov cr.ipsr=r16 // set new PSR add r3=1f-ia64_switch_mode_virt,r15 mov r14=rp // get return address into a general register ;; // going to virtual // - for code addresses, set upper bits of addr to KERNEL_START // - for stack addresses, copy from input argument movl r18=KERNEL_START dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT mov sp=r20 ;; or r3=r3,r18 or r14=r14,r18 ;; mov r18=ar.rnat // save ar.rnat mov ar.bspstore=r19 // this steps on ar.rnat mov cr.iip=r3 mov cr.ifs=r0 ;; mov ar.rnat=r18 // restore ar.rnat rfi // must be last insn in group ;; 1: mov rp=r14 br.ret.sptk.many rp END(ia64_switch_mode_virt) GLOBAL_ENTRY(ia64_delay_loop) .prologue { nop 0 // work around GAS unwind info generation bug... .save ar.lc,r2 mov r2=ar.lc .body ;; mov ar.lc=r32 } ;; // force loop to be 32-byte aligned (GAS bug means we cannot use .align // inside function body without corrupting unwind info). { nop 0 } 1: br.cloop.sptk.few 1b ;; mov ar.lc=r2 br.ret.sptk.many rp END(ia64_delay_loop) /* * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even * that would happen only once every 5+ years. * * The code below basically calculates: * * (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT * * except that the multiplication and the shift are done with 128-bit * intermediate precision so that we can produce a full 64-bit result. */ GLOBAL_ENTRY(ia64_native_sched_clock) addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) ;; ldf8 f8=[r8] ;; setf.sig f9=r9 // certain to stall, so issue it _after_ ldf8... ;; xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product ;; getf.sig r8=f10 // (5 cyc) getf.sig r9=f11 ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp END(ia64_native_sched_clock) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE GLOBAL_ENTRY(cycle_to_nsec) alloc r16=ar.pfs,1,0,0,0 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 ;; ldf8 f8=[r8] ;; setf.sig f9=r32 ;; xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product ;; getf.sig r8=f10 // (5 cyc) getf.sig r9=f11 ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp END(cycle_to_nsec) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_IA64_BRL_EMU /* * Assembly routines used by brl_emu.c to set preserved register state. */ #define SET_REG(reg) \ GLOBAL_ENTRY(ia64_set_##reg); \ alloc r16=ar.pfs,1,0,0,0; \ mov reg=r32; \ ;; \ br.ret.sptk.many rp; \ END(ia64_set_##reg) SET_REG(b1); SET_REG(b2); SET_REG(b3); SET_REG(b4); SET_REG(b5); #endif /* CONFIG_IA64_BRL_EMU */ #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU GLOBAL_ENTRY(ia64_jump_to_sal) alloc r16=ar.pfs,1,0,0,0;; rsm psr.i | psr.ic { flushrs srlz.i } tpa r25=in0 movl r18=tlb_purge_done;; DATA_VA_TO_PA(r18);; mov b1=r18 // Return location movl r18=ia64_do_tlb_purge;; DATA_VA_TO_PA(r18);; mov b2=r18 // doing tlb_flush work mov ar.rsc=0 // Put RSE in enforced lazy, LE mode movl r17=1f;; DATA_VA_TO_PA(r17);; mov cr.iip=r17 movl r16=SAL_PSR_BITS_TO_SET;; mov cr.ipsr=r16 mov cr.ifs=r0;; rfi;; // note: this unmask MCA/INIT (psr.mc) 1: /* * Invalidate all TLB data/inst */ br.sptk.many b2;; // jump to tlb purge code tlb_purge_done: RESTORE_REGION_REGS(r25, r17,r18,r19);; RESTORE_REG(b0, r25, r17);; RESTORE_REG(b1, r25, r17);; RESTORE_REG(b2, r25, r17);; RESTORE_REG(b3, r25, r17);; RESTORE_REG(b4, r25, r17);; RESTORE_REG(b5, r25, r17);; ld8 r1=[r25],0x08;; ld8 r12=[r25],0x08;; ld8 r13=[r25],0x08;; RESTORE_REG(ar.fpsr, r25, r17);; RESTORE_REG(ar.pfs, r25, r17);; RESTORE_REG(ar.rnat, r25, r17);; RESTORE_REG(ar.unat, r25, r17);; RESTORE_REG(ar.bspstore, r25, r17);; RESTORE_REG(cr.dcr, r25, r17);; RESTORE_REG(cr.iva, r25, r17);; RESTORE_REG(cr.pta, r25, r17);; srlz.d;; // required not to violate RAW dependency RESTORE_REG(cr.itv, r25, r17);; RESTORE_REG(cr.pmv, r25, r17);; RESTORE_REG(cr.cmcv, r25, r17);; RESTORE_REG(cr.lrr0, r25, r17);; RESTORE_REG(cr.lrr1, r25, r17);; ld8 r4=[r25],0x08;; ld8 r5=[r25],0x08;; ld8 r6=[r25],0x08;; ld8 r7=[r25],0x08;; ld8 r17=[r25],0x08;; mov pr=r17,-1;; RESTORE_REG(ar.lc, r25, r17);; /* * Now Restore floating point regs */ ldf.fill.nta f2=[r25],16;; ldf.fill.nta f3=[r25],16;; ldf.fill.nta f4=[r25],16;; ldf.fill.nta f5=[r25],16;; ldf.fill.nta f16=[r25],16;; ldf.fill.nta f17=[r25],16;; ldf.fill.nta f18=[r25],16;; ldf.fill.nta f19=[r25],16;; ldf.fill.nta f20=[r25],16;; ldf.fill.nta f21=[r25],16;; ldf.fill.nta f22=[r25],16;; ldf.fill.nta f23=[r25],16;; ldf.fill.nta f24=[r25],16;; ldf.fill.nta f25=[r25],16;; ldf.fill.nta f26=[r25],16;; ldf.fill.nta f27=[r25],16;; ldf.fill.nta f28=[r25],16;; ldf.fill.nta f29=[r25],16;; ldf.fill.nta f30=[r25],16;; ldf.fill.nta f31=[r25],16;; /* * Now that we have done all the register restores * we are now ready for the big DIVE to SAL Land */ ssm psr.ic;; srlz.d;; br.ret.sptk.many b0;; END(ia64_jump_to_sal) #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */
aixcc-public/challenge-001-exemplar-source
7,880
arch/ia64/kernel/pal.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * PAL Firmware support * IA-64 Processor Programmers Reference Vol 2 * * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * * 05/22/2000 eranian Added support for stacked register calls * 05/24/2000 eranian Added support for physical mode static calls */ #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/export.h> .data pal_entry_point: data8 ia64_pal_default_handler .text /* * Set the PAL entry point address. This could be written in C code, but we * do it here to keep it all in one module (besides, it's so trivial that it's * not a big deal). * * in0 Address of the PAL entry point (text address, NOT a function * descriptor). */ GLOBAL_ENTRY(ia64_pal_handler_init) alloc r3=ar.pfs,1,0,0,0 movl r2=pal_entry_point ;; st8 [r2]=in0 br.ret.sptk.many rp END(ia64_pal_handler_init) /* * Default PAL call handler. This needs to be coded in assembly because it * uses the static calling convention, i.e., the RSE may not be used and * calls are done via "br.cond" (not "br.call"). */ GLOBAL_ENTRY(ia64_pal_default_handler) mov r8=-1 br.cond.sptk.many rp END(ia64_pal_default_handler) /* * Make a PAL call using the static calling convention. * * in0 Index of PAL service * in1 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_static) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,5,0,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 mov r29 = in1 mov r8 = ip } ;; ld8 loc2 = [loc2] // loc2 <- entry point adds r8 = 1f-1b,r8 mov loc4=ar.rsc // save RSE configuration ;; mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov loc3 = psr mov loc0 = rp .body mov r30 = in2 mov r31 = in3 mov b7 = loc2 rsm psr.i ;; mov rp = r8 br.cond.sptk.many b7 1: mov psr.l = loc3 mov ar.rsc = loc4 // restore RSE configuration mov ar.pfs = loc1 mov rp = loc0 ;; srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) EXPORT_SYMBOL(ia64_pal_call_static) /* * Make a PAL call using the stacked registers calling convention. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,4,4,0 movl loc2 = pal_entry_point mov r28 = in0 // Index MUST be copied to r28 mov out0 = in0 // AND in0 of PAL function mov loc0 = rp .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov out1 = in1 mov out2 = in2 mov out3 = in3 mov loc3 = psr ;; rsm psr.i mov b7 = loc2 ;; br.call.sptk.many rp=b7 // now make the call .ret0: mov psr.l = loc3 mov ar.pfs = loc1 mov rp = loc0 ;; srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_stacked) EXPORT_SYMBOL(ia64_pal_call_stacked) /* * Make a physical mode PAL call using the static registers calling convention. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments * * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. * So we don't need to clear them. */ #define PAL_PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PAL_PSR_BITS_TO_SET \ (IA64_PSR_BN) GLOBAL_ENTRY(ia64_pal_call_phys_static) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,7,0,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 // copy procedure index mov r8 = ip // save ip to compute branch mov loc0 = rp // save rp } .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov r29 = in1 // first argument mov r30 = in2 // copy arg2 mov r31 = in3 // copy arg3 ;; mov loc3 = psr // save psr adds r8 = 1f-1b,r8 // calculate return address for call ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical tpa r8=r8 // convert rp to physical ;; mov b7 = loc2 // install target to branch reg mov ar.rsc=0 // put RSE in enforced lazy, LE mode movl r16=PAL_PSR_BITS_TO_CLEAR movl r17=PAL_PSR_BITS_TO_SET ;; or loc3=loc3,r17 // add in psr the bits to set ;; andcm r16=loc3,r16 // removes bits to clear from psr br.call.sptk.many rp=ia64_switch_mode_phys mov rp = r8 // install return address (physical) mov loc5 = r19 mov loc6 = r20 br.cond.sptk.many b7 1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) EXPORT_SYMBOL(ia64_pal_call_phys_static) /* * Make a PAL call using the stacked registers in physical mode. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) alloc loc1 = ar.pfs,5,7,4,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 // copy procedure index mov loc0 = rp // save rp } .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov loc3 = psr // save psr ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical ;; mov ar.rsc=0 // put RSE in enforced lazy, LE mode movl r16=PAL_PSR_BITS_TO_CLEAR movl r17=PAL_PSR_BITS_TO_SET ;; or loc3=loc3,r17 // add in psr the bits to set mov b7 = loc2 // install target to branch reg ;; andcm r16=loc3,r16 // removes bits to clear from psr br.call.sptk.many rp=ia64_switch_mode_phys mov out0 = in0 // first argument mov out1 = in1 // copy arg2 mov out2 = in2 // copy arg3 mov out3 = in3 // copy arg3 mov loc5 = r19 mov loc6 = r20 br.call.sptk.many rp=b7 // now make the call mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) EXPORT_SYMBOL(ia64_pal_call_phys_stacked) /* * Save scratch fp scratch regs which aren't saved in pt_regs already * (fp10-fp15). * * NOTE: We need to do this since firmware (SAL and PAL) may use any of the * scratch regs fp-low partition. * * Inputs: * in0 Address of stack storage for fp regs */ GLOBAL_ENTRY(ia64_save_scratch_fpregs) alloc r3=ar.pfs,1,0,0,0 add r2=16,in0 ;; stf.spill [in0] = f10,32 stf.spill [r2] = f11,32 ;; stf.spill [in0] = f12,32 stf.spill [r2] = f13,32 ;; stf.spill [in0] = f14,32 stf.spill [r2] = f15,32 br.ret.sptk.many rp END(ia64_save_scratch_fpregs) EXPORT_SYMBOL(ia64_save_scratch_fpregs) /* * Load scratch fp scratch regs (fp10-fp15) * * Inputs: * in0 Address of stack storage for fp regs */ GLOBAL_ENTRY(ia64_load_scratch_fpregs) alloc r3=ar.pfs,1,0,0,0 add r2=16,in0 ;; ldf.fill f10 = [in0],32 ldf.fill f11 = [r2],32 ;; ldf.fill f12 = [in0],32 ldf.fill f13 = [r2],32 ;; ldf.fill f14 = [in0],32 ldf.fill f15 = [r2],32 br.ret.sptk.many rp END(ia64_load_scratch_fpregs) EXPORT_SYMBOL(ia64_load_scratch_fpregs)
aixcc-public/challenge-001-exemplar-source
39,235
arch/ia64/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999, 2002-2003 * Asit Mallick <Asit.K.Mallick@intel.com> * Don Dugger <Don.Dugger@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Fenghua Yu <fenghua.yu@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> */ /* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * Jonathan Nicklin <nicklin@missioncriticallinux.com> * Patrick O'Rourke <orourke@missioncriticallinux.com> * 11/07/2000 */ /* * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * pv_ops. */ /* * Global (preserved) predicate usage on syscall entry/exit path: * * pKStk: See entry.h. * pUStk: See entry.h. * pSys: See entry.h. * pNonSys: !pSys */ #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/kregs.h> #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/processor.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/export.h> #include "minstate.h" /* * execve() is special because in case of success, we need to * setup a null register window frame. */ ENTRY(ia64_execve) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,3,0 mov loc0=rp .body mov out0=in0 // filename ;; // stop bit between alloc and call mov out1=in1 // argv mov out2=in2 // envp br.call.sptk.many rp=sys_execve .ret0: cmp4.ge p6,p7=r8,r0 mov ar.pfs=loc1 // restore ar.pfs sxt4 r8=r8 // return 64-bit result ;; stf.spill [sp]=f0 mov rp=loc0 (p6) mov ar.pfs=r0 // clear ar.pfs on success (p7) br.ret.sptk.many rp /* * In theory, we'd have to zap this state only to prevent leaking of * security sensitive state (e.g., if current->mm->dumpable is zero). However, * this executes in less than 20 cycles even on Itanium, so it's not worth * optimizing for...). */ mov ar.unat=0; mov ar.lc=0 mov r4=0; mov f2=f0; mov b1=r0 mov r5=0; mov f3=f0; mov b2=r0 mov r6=0; mov f4=f0; mov b3=r0 mov r7=0; mov f5=f0; mov b4=r0 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 br.ret.sptk.many rp END(ia64_execve) /* * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, * u64 tls) */ GLOBAL_ENTRY(sys_clone2) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK mov loc0=rp mov loc1=r16 // save ar.pfs across ia64_clone .body mov out0=in0 mov out1=in1 mov out2=in2 mov out3=in3 mov out4=in4 mov out5=in5 br.call.sptk.many rp=ia64_clone .ret1: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(sys_clone2) /* * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) * Deprecated. Use sys_clone2() instead. */ GLOBAL_ENTRY(sys_clone) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK mov loc0=rp mov loc1=r16 // save ar.pfs across ia64_clone .body mov out0=in0 mov out1=in1 mov out2=16 // stacksize (compensates for 16-byte scratch area) mov out3=in3 mov out4=in4 mov out5=in5 br.call.sptk.many rp=ia64_clone .ret2: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(sys_clone) /* * prev_task <- ia64_switch_to(struct task_struct *next) * With Ingo's new scheduler, interrupts are disabled when this routine gets * called. The code starting at .map relies on this. The rest of the code * doesn't care about the interrupt masking status. */ GLOBAL_ENTRY(ia64_switch_to) .prologue alloc r16=ar.pfs,1,0,0,0 DO_SAVE_SWITCH_STACK .body adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 movl r25=init_task mov r27=IA64_KR(CURRENT_STACK) adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 dep r20=0,in0,61,3 // physical address of "next" ;; st8 [r22]=sp // save kernel stack pointer of old task shr.u r26=r20,IA64_GRANULE_SHIFT cmp.eq p7,p6=r25,in0 ;; /* * If we've already mapped this task's page, we can skip doing it again. */ (p6) cmp.eq p7,p6=r26,r27 (p6) br.cond.dpnt .map ;; .done: ld8 sp=[r21] // load kernel stack pointer of new task MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register mov r8=r13 // return pointer to previously running task mov r13=in0 // set "current" pointer ;; DO_LOAD_SWITCH_STACK #ifdef CONFIG_SMP sync.i // ensure "fc"s done by this CPU are visible on other CPUs #endif br.ret.sptk.many rp // boogie on out in new context .map: RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here movl r25=PAGE_KERNEL ;; srlz.d or r23=r25,r20 // construct PA | page properties mov r25=IA64_GRANULE_SHIFT<<2 ;; MOV_TO_ITIR(p0, r25, r8) MOV_TO_IFA(in0, r8) // VA of next task... ;; mov r25=IA64_TR_CURRENT_STACK MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped... ;; itr.d dtr[r25]=r23 // wire in new mapping... SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit br.cond.sptk .done END(ia64_switch_to) /* * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This * means that we may get an interrupt with "sp" pointing to the new kernel stack while * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a * problem. Also, we don't need to specify unwind information for preserved registers * that are not modified in save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. */ /* * save_switch_stack: * - r16 holds ar.pfs * - b7 holds address to return to * - rp (b0) holds return address to save */ GLOBAL_ENTRY(save_switch_stack) .prologue .altrp b7 flushrs // flush dirty regs to backing store (must be first in insn group) .save @priunat,r17 mov r17=ar.unat // preserve caller's .body #ifdef CONFIG_ITANIUM adds r2=16+128,sp adds r3=16+64,sp adds r14=SW(R4)+16,sp ;; st8.spill [r14]=r4,16 // spill r4 lfetch.fault.excl.nt1 [r3],128 ;; lfetch.fault.excl.nt1 [r2],128 lfetch.fault.excl.nt1 [r3],128 ;; lfetch.fault.excl [r2] lfetch.fault.excl [r3] adds r15=SW(R5)+16,sp #else add r2=16+3*128,sp add r3=16,sp add r14=SW(R4)+16,sp ;; st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010 ;; lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090 lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190 ;; lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110 lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210 adds r15=SW(R5)+16,sp #endif ;; st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0 add r2=SW(F2)+16,sp // r2 = &sw->f2 ;; st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6 mov.m r18=ar.fpsr // preserve fpsr add r3=SW(F3)+16,sp // r3 = &sw->f3 ;; stf.spill [r2]=f2,32 mov.m r19=ar.rnat mov r21=b0 stf.spill [r3]=f3,32 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7 mov r22=b1 ;; // since we're done with the spills, read and save ar.unat: mov.m r29=ar.unat mov.m r20=ar.bspstore mov r23=b2 stf.spill [r2]=f4,32 stf.spill [r3]=f5,32 mov r24=b3 ;; st8 [r14]=r21,SW(B1)-SW(B0) // save b0 st8 [r15]=r23,SW(B3)-SW(B2) // save b2 mov r25=b4 mov r26=b5 ;; st8 [r14]=r22,SW(B4)-SW(B1) // save b1 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3 mov r21=ar.lc // I-unit stf.spill [r2]=f12,32 stf.spill [r3]=f13,32 ;; st8 [r14]=r25,SW(B5)-SW(B4) // save b4 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs stf.spill [r2]=f14,32 stf.spill [r3]=f15,32 ;; st8 [r14]=r26 // save b5 st8 [r15]=r21 // save ar.lc stf.spill [r2]=f16,32 stf.spill [r3]=f17,32 ;; stf.spill [r2]=f18,32 stf.spill [r3]=f19,32 ;; stf.spill [r2]=f20,32 stf.spill [r3]=f21,32 ;; stf.spill [r2]=f22,32 stf.spill [r3]=f23,32 ;; stf.spill [r2]=f24,32 stf.spill [r3]=f25,32 ;; stf.spill [r2]=f26,32 stf.spill [r3]=f27,32 ;; stf.spill [r2]=f28,32 stf.spill [r3]=f29,32 ;; stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30) stf.spill [r3]=f31,SW(PR)-SW(F31) add r14=SW(CALLER_UNAT)+16,sp ;; st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat mov r21=pr ;; st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat st8 [r3]=r21 // save predicate registers ;; st8 [r2]=r20 // save ar.bspstore st8 [r14]=r18 // save fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.many b7 END(save_switch_stack) /* * load_switch_stack: * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) * - b7 holds address to return to * - must not touch r8-r11 */ GLOBAL_ENTRY(load_switch_stack) .prologue .altrp b7 .body lfetch.fault.nt1 [sp] adds r2=SW(AR_BSPSTORE)+16,sp adds r3=SW(AR_UNAT)+16,sp mov ar.rsc=0 // put RSE into enforced lazy mode adds r14=SW(CALLER_UNAT)+16,sp adds r15=SW(AR_FPSR)+16,sp ;; ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat ;; ld8 r21=[r2],16 // restore b0 ld8 r22=[r3],16 // restore b1 ;; ld8 r23=[r2],16 // restore b2 ld8 r24=[r3],16 // restore b3 ;; ld8 r25=[r2],16 // restore b4 ld8 r26=[r3],16 // restore b5 ;; ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc ;; ld8 r28=[r2] // restore pr ld8 r30=[r3] // restore rnat ;; ld8 r18=[r14],16 // restore caller's unat ld8 r19=[r15],24 // restore fpsr ;; ldf.fill f2=[r14],32 ldf.fill f3=[r15],32 ;; ldf.fill f4=[r14],32 ldf.fill f5=[r15],32 ;; ldf.fill f12=[r14],32 ldf.fill f13=[r15],32 ;; ldf.fill f14=[r14],32 ldf.fill f15=[r15],32 ;; ldf.fill f16=[r14],32 ldf.fill f17=[r15],32 ;; ldf.fill f18=[r14],32 ldf.fill f19=[r15],32 mov b0=r21 ;; ldf.fill f20=[r14],32 ldf.fill f21=[r15],32 mov b1=r22 ;; ldf.fill f22=[r14],32 ldf.fill f23=[r15],32 mov b2=r23 ;; mov ar.bspstore=r27 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7 mov b3=r24 ;; ldf.fill f24=[r14],32 ldf.fill f25=[r15],32 mov b4=r25 ;; ldf.fill f26=[r14],32 ldf.fill f27=[r15],32 mov b5=r26 ;; ldf.fill f28=[r14],32 ldf.fill f29=[r15],32 mov ar.pfs=r16 ;; ldf.fill f30=[r14],32 ldf.fill f31=[r15],24 mov ar.lc=r17 ;; ld8.fill r4=[r14],16 ld8.fill r5=[r15],16 mov pr=r28,-1 ;; ld8.fill r6=[r14],16 ld8.fill r7=[r15],16 mov ar.unat=r18 // restore caller's unat mov ar.rnat=r30 // must restore after bspstore but before rsc! mov ar.fpsr=r19 // restore fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.many b7 END(load_switch_stack) /* * Invoke a system call, but do some tracing before and after the call. * We MUST preserve the current register frame throughout this routine * because some system calls (such as ia64_execve) directly * manipulate ar.pfs. */ GLOBAL_ENTRY(ia64_trace_syscall) PT_REGS_UNWIND_INFO(0) /* * We need to preserve the scratch registers f6-f11 in case the system * call is sigreturn. */ adds r16=PT(F6)+16,sp adds r17=PT(F7)+16,sp ;; stf.spill [r16]=f6,32 stf.spill [r17]=f7,32 ;; stf.spill [r16]=f8,32 stf.spill [r17]=f9,32 ;; stf.spill [r16]=f10 stf.spill [r17]=f11 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args cmp.lt p6,p0=r8,r0 // check tracehook adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=0 (p6) br.cond.sptk strace_error // syscall failed -> adds r16=PT(F6)+16,sp adds r17=PT(F7)+16,sp ;; ldf.fill f6=[r16],32 ldf.fill f7=[r17],32 ;; ldf.fill f8=[r16],32 ldf.fill f9=[r17],32 ;; ldf.fill f10=[r16] ldf.fill f11=[r17] // the syscall number may have changed, so re-load it and re-calculate the // syscall entry-point: adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #) ;; ld8 r15=[r15] mov r3=NR_syscalls - 1 ;; adds r15=-1024,r15 movl r16=sys_call_table ;; shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) cmp.leu p6,p7=r15,r3 ;; (p6) ld8 r20=[r20] // load address of syscall entry point (p7) movl r20=sys_ni_syscall ;; mov b6=r20 br.call.sptk.many rp=b6 // do the syscall .strace_check_retval: cmp.lt p6,p0=r8,r0 // syscall failed? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=0 (p6) br.cond.sptk strace_error // syscall failed -> ;; // avoid RAW on r10 .strace_save_retval: .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value .ret3: (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) rsm psr.i // disable interrupts br.cond.sptk ia64_work_pending_syscall_end strace_error: ld8 r3=[r2] // load pt_regs.r8 sub r9=0,r8 // negate return value to get errno value ;; cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? adds r3=16,r2 // r3=&pt_regs.r10 ;; (p6) mov r10=-1 (p6) mov r8=r9 br.cond.sptk .strace_save_retval END(ia64_trace_syscall) /* * When traced and returning from sigreturn, we invoke syscall_trace but then * go straight to ia64_leave_kernel rather than ia64_leave_syscall. */ GLOBAL_ENTRY(ia64_strace_leave_kernel) PT_REGS_UNWIND_INFO(0) { /* * Some versions of gas generate bad unwind info if the first instruction of a * procedure doesn't go into the first slot of a bundle. This is a workaround. */ nop.m 0 nop.i 0 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value } .ret4: br.cond.sptk ia64_leave_kernel END(ia64_strace_leave_kernel) ENTRY(call_payload) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0) /* call the kernel_thread payload; fn is in r4, arg - in r5 */ alloc loc1=ar.pfs,0,3,1,0 mov loc0=rp mov loc2=gp mov out0=r5 // arg ld8 r14 = [r4], 8 // fn.address ;; mov b6 = r14 ld8 gp = [r4] // fn.gp ;; br.call.sptk.many rp=b6 // fn(arg) .ret12: mov gp=loc2 mov rp=loc0 mov ar.pfs=loc1 /* ... and if it has returned, we are going to userland */ cmp.ne pKStk,pUStk=r0,r0 br.ret.sptk.many rp END(call_payload) GLOBAL_ENTRY(ia64_ret_from_clone) PT_REGS_UNWIND_INFO(0) { /* * Some versions of gas generate bad unwind info if the first instruction of a * procedure doesn't go into the first slot of a bundle. This is a workaround. */ nop.m 0 nop.i 0 /* * We need to call schedule_tail() to complete the scheduling process. * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the * address of the previously executing task. */ br.call.sptk.many rp=ia64_invoke_schedule_tail } .ret8: (pKStk) br.call.sptk.many rp=call_payload adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 ;; ld4 r2=[r2] ;; mov r8=0 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 ;; cmp.ne p6,p0=r2,r0 (p6) br.cond.spnt .strace_check_retval ;; // added stop bits to prevent r8 dependency END(ia64_ret_from_clone) // fall through GLOBAL_ENTRY(ia64_ret_from_syscall) PT_REGS_UNWIND_INFO(0) cmp.ge p6,p7=r8,r0 // syscall executed successfully? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 mov r10=r0 // clear error indication in r10 (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure END(ia64_ret_from_syscall) // fall through /* * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't * need to switch to bank 0 and doesn't restore the scratch registers. * To avoid leaking kernel bits, the scratch registers are set to * the following known-to-be-safe values: * * r1: restored (global pointer) * r2: cleared * r3: 1 (when returning to user-level) * r8-r11: restored (syscall return value(s)) * r12: restored (user-level stack pointer) * r13: restored (user-level thread pointer) * r14: set to __kernel_syscall_via_epc * r15: restored (syscall #) * r16-r17: cleared * r18: user-level b6 * r19: cleared * r20: user-level ar.fpsr * r21: user-level b0 * r22: cleared * r23: user-level ar.bspstore * r24: user-level ar.rnat * r25: user-level ar.unat * r26: user-level ar.pfs * r27: user-level ar.rsc * r28: user-level ip * r29: user-level psr * r30: user-level cfm * r31: user-level pr * f6-f11: cleared * pr: restored (user-level pr) * b0: restored (user-level rp) * b6: restored * b7: set to __kernel_syscall_via_epc * ar.unat: restored (user-level ar.unat) * ar.pfs: restored (user-level ar.pfs) * ar.rsc: restored (user-level ar.rsc) * ar.rnat: restored (user-level ar.rnat) * ar.bspstore: restored (user-level ar.bspstore) * ar.fpsr: restored (user-level ar.fpsr) * ar.ccv: cleared * ar.csd: cleared * ar.ssd: cleared */ GLOBAL_ENTRY(ia64_leave_syscall) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ #ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r2, r18) // disable interrupts cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ;; .pred.rel.mutex pUStk,pKStk (pKStk) ld4 r21=[r20] // r21 <- preempt_count (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else /* !CONFIG_PREEMPTION */ RSM_PSR_I(pUStk, r2, r18) cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif .global ia64_work_processed_syscall; ia64_work_processed_syscall: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE adds r2=PT(LOADRS)+16,r12 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r18] // load current_thread_info()->flags ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" adds r3=PT(AR_BSPSTORE)+16,r12 // deferred ;; #else adds r2=PT(LOADRS)+16,r12 adds r3=PT(AR_BSPSTORE)+16,r12 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r18] // load current_thread_info()->flags ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" nop.i 0 ;; #endif mov r16=ar.bsp // M2 get existing backing store pointer ld8 r18=[r2],PT(R9)-PT(B6) // load b6 (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? ;; ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? (p6) br.cond.spnt .work_pending_syscall ;; // start restoring the state saved on the kernel stack (struct pt_regs): ld8 r9=[r2],PT(CR_IPSR)-PT(R9) ld8 r11=[r3],PT(CR_IIP)-PT(R11) (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! ;; invala // M0|1 invalidate ALAT RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs ld8 r29=[r2],16 // M0|1 load cr.ipsr ld8 r28=[r3],16 // M0|1 load cr.iip #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 ;; ld8 r30=[r2],16 // M0|1 load cr.ifs ld8 r25=[r3],16 // M0|1 load ar.unat (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; #else mov r22=r0 // A clear r22 ;; ld8 r30=[r2],16 // M0|1 load cr.ifs ld8 r25=[r3],16 // M0|1 load ar.unat (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; #endif ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled nop 0 ;; ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc mov f6=f0 // F clear f6 ;; ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage) ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates mov f7=f0 // F clear f7 ;; ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr ld8.fill r1=[r3],16 // M0|1 load r1 (pUStk) mov r17=1 // A ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) st1 [r15]=r17 // M2|3 #else (pUStk) st1 [r14]=r17 // M2|3 #endif ld8.fill r13=[r3],16 // M0|1 mov f8=f0 // F clear f8 ;; ld8.fill r12=[r2] // M0|1 restore r12 (sp) ld8.fill r15=[r3] // M0|1 restore r15 mov b6=r18 // I0 restore b6 LOAD_PHYS_STACK_REG_SIZE(r17) mov f9=f0 // F clear f9 (pKStk) br.cond.dpnt.many skip_rbs_switch // B srlz.d // M0 ensure interruption collection is off (for cover) shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition COVER // B add current frame into dirty partition & set cr.ifs ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE mov r19=ar.bsp // M2 get new backing store pointer st8 [r14]=r22 // M save time at leave mov f10=f0 // F clear f10 mov r22=r0 // A clear r22 movl r14=__kernel_syscall_via_epc // X ;; #else mov r19=ar.bsp // M2 get new backing store pointer mov f10=f0 // F clear f10 nop.m 0 movl r14=__kernel_syscall_via_epc // X ;; #endif mov.m ar.csd=r0 // M2 clear ar.csd mov.m ar.ccv=r0 // M2 clear ar.ccv mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) mov.m ar.ssd=r0 // M2 clear ar.ssd mov f11=f0 // F clear f11 br.cond.sptk.many rbs_switch // B END(ia64_leave_syscall) GLOBAL_ENTRY(ia64_leave_kernel) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ #ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r17, r31) // disable interrupts cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ;; .pred.rel.mutex pUStk,pKStk (pKStk) ld4 r21=[r20] // r21 <- preempt_count (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else RSM_PSR_I(pUStk, r17, r31) cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif .work_processed_kernel: adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r17] // load current_thread_info()->flags adds r21=PT(PR)+16,r12 ;; lfetch [r21],PT(CR_IPSR)-PT(PR) adds r2=PT(B6)+16,r12 adds r3=PT(R16)+16,r12 ;; lfetch [r21] ld8 r28=[r2],8 // load b6 adds r29=PT(R24)+16,r12 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) adds r30=PT(AR_CCV)+16,r12 (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? ;; ld8.fill r24=[r29] ld8 r15=[r30] // load ar.ccv (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? ;; ld8 r29=[r2],16 // load b7 ld8 r30=[r3],16 // load ar.csd (p6) br.cond.spnt .work_pending ;; ld8 r31=[r2],16 // load ar.ssd ld8.fill r8=[r3],16 ;; ld8.fill r9=[r2],16 ld8.fill r10=[r3],PT(R17)-PT(R10) ;; ld8.fill r11=[r2],PT(R18)-PT(R11) ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 mov ar.csd=r30 mov ar.ssd=r31 ;; RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection invala // invalidate ALAT ;; ld8.fill r22=[r2],24 ld8.fill r23=[r3],24 mov b6=r28 ;; ld8.fill r25=[r2],16 ld8.fill r26=[r3],16 mov b7=r29 ;; ld8.fill r27=[r2],16 ld8.fill r28=[r3],16 ;; ld8.fill r29=[r2],16 ld8.fill r30=[r3],24 ;; ld8.fill r31=[r2],PT(F9)-PT(R31) adds r3=PT(F10)-PT(F6),r3 ;; ldf.fill f9=[r2],PT(F6)-PT(F9) ldf.fill f10=[r3],PT(F8)-PT(F10) ;; ldf.fill f6=[r2],PT(F7)-PT(F6) ;; ldf.fill f7=[r2],PT(F11)-PT(F7) ldf.fill f8=[r3],32 ;; srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned) mov ar.ccv=r15 ;; ldf.fill f11=[r2] BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...) ;; (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) adds r16=PT(CR_IPSR)+16,r12 adds r17=PT(CR_IIP)+16,r12 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE .pred.rel.mutex pUStk,pKStk MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave nop.i 0 ;; #else MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled nop.i 0 nop.i 0 ;; #endif ld8 r29=[r16],16 // load cr.ipsr ld8 r28=[r17],16 // load cr.iip ;; ld8 r30=[r16],16 // load cr.ifs ld8 r25=[r17],16 // load ar.unat ;; ld8 r26=[r16],16 // load ar.pfs ld8 r27=[r17],16 // load ar.rsc cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs ;; ld8 r24=[r16],16 // load ar.rnat (may be garbage) ld8 r23=[r17],16 // load ar.bspstore (may be garbage) ;; ld8 r31=[r16],16 // load predicates ld8 r21=[r17],16 // load b0 ;; ld8 r19=[r16],16 // load ar.rsc value for "loadrs" ld8.fill r1=[r17],16 // load r1 ;; ld8.fill r12=[r16],16 ld8.fill r13=[r17],16 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 #else (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 #endif ;; ld8 r20=[r16],16 // ar.fpsr ld8.fill r15=[r17],16 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred #endif ;; ld8.fill r14=[r16],16 ld8.fill r2=[r17] (pUStk) mov r17=1 ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; // mib : mov add br -> mib : ld8 add br // bbb_ : br nop cover;; mbb_ : mov br cover;; // // no one require bsp in r16 if (pKStk) branch is selected. (pUStk) st8 [r3]=r22 // save time at leave (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; ld8.fill r3=[r16] // deferred LOAD_PHYS_STACK_REG_SIZE(r17) (pKStk) br.cond.dpnt skip_rbs_switch mov r16=ar.bsp // get existing backing store pointer #else ld8.fill r3=[r16] (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; mov r16=ar.bsp // get existing backing store pointer LOAD_PHYS_STACK_REG_SIZE(r17) (pKStk) br.cond.dpnt skip_rbs_switch #endif /* * Restore user backing store. * * NOTE: alloc, loadrs, and cover can't be predicated. */ (pNonSys) br.cond.dpnt dont_preserve_current_frame COVER // add current frame into dirty partition and set cr.ifs ;; mov r19=ar.bsp // get new backing store pointer rbs_switch: sub r16=r16,r18 // krbs = old bsp - size of dirty partition cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs ;; sub r19=r19,r16 // calculate total byte size of dirty partition add r18=64,r18 // don't force in0-in7 into memory... ;; shl r19=r19,16 // shift size of dirty partition into loadrs position ;; dont_preserve_current_frame: /* * To prevent leaking bits between the kernel and user-space, * we must clear the stacked registers in the "invalid" partition here. * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium, * 5 registers/cycle on McKinley). */ # define pRecurse p6 # define pReturn p7 #ifdef CONFIG_ITANIUM # define Nregs 10 #else # define Nregs 14 #endif alloc loc0=ar.pfs,2,Nregs-2,2,0 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize ;; mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" shladd in0=loc1,3,r17 mov in1=0 ;; TEXT_ALIGN(32) rse_clear_invalid: #ifdef CONFIG_ITANIUM // cycle 0 { .mii alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0 }{ .mfb add out1=1,in1 // increment recursion count nop.f 0 nop.b 0 // can't do br.call here because of alloc (WAW on CFM) ;; }{ .mfi // cycle 1 mov loc1=0 nop.f 0 mov loc2=0 }{ .mib mov loc3=0 mov loc4=0 (pRecurse) br.call.sptk.many b0=rse_clear_invalid }{ .mfi // cycle 2 mov loc5=0 nop.f 0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret }{ .mib mov loc6=0 mov loc7=0 (pReturn) br.ret.sptk.many b0 } #else /* !CONFIG_ITANIUM */ alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0 add out1=1,in1 // increment recursion count mov loc1=0 mov loc2=0 ;; mov loc3=0 mov loc4=0 mov loc5=0 mov loc6=0 mov loc7=0 (pRecurse) br.call.dptk.few b0=rse_clear_invalid ;; mov loc8=0 mov loc9=0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret mov loc10=0 mov loc11=0 (pReturn) br.ret.dptk.many b0 #endif /* !CONFIG_ITANIUM */ # undef pRecurse # undef pReturn ;; alloc r17=ar.pfs,0,0,0,0 // drop current register frame ;; loadrs ;; skip_rbs_switch: mov ar.unat=r25 // M2 (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise ;; (pUStk) mov ar.bspstore=r23 // M2 (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise ;; MOV_TO_IPSR(p0, r29, r25) // M2 mov ar.pfs=r26 // I0 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise MOV_TO_IFS(p9, r30, r25)// M2 mov b0=r21 // I0 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise mov ar.fpsr=r20 // M2 MOV_TO_IIP(r28, r25) // M2 nop 0 ;; (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode nop 0 (pLvSys)mov r2=r0 mov ar.rsc=r27 // M2 mov pr=r31,-1 // I0 RFI // B /* * On entry: * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPTION) * r31 = current->thread_info->flags * On exit: * p6 = TRUE if work-pending-check needs to be redone * * Interrupts are disabled on entry, reenabled depend on work, and * disabled on exit. */ .work_pending_syscall: add r2=-8,r2 add r3=-8,r3 ;; st8 [r2]=r8 st8 [r3]=r10 .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end br.cond.sptk.many .work_processed_kernel .notify: (pUStk) br.call.spnt.many rp=notify_resume_user .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end br.cond.sptk.many .work_processed_kernel .global ia64_work_pending_syscall_end; ia64_work_pending_syscall_end: adds r2=PT(R8)+16,r12 adds r3=PT(R10)+16,r12 ;; ld8 r8=[r2] ld8 r10=[r3] br.cond.sptk.many ia64_work_processed_syscall END(ia64_leave_kernel) ENTRY(handle_syscall_error) /* * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could * lead us to mistake a negative return value as a failed syscall. Those syscall * must deposit a non-zero value in pt_regs.r8 to indicate an error. If * pt_regs.r8 is zero, we assume that the call completed successfully. */ PT_REGS_UNWIND_INFO(0) ld8 r3=[r2] // load pt_regs.r8 ;; cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? ;; (p7) mov r10=-1 (p7) sub r8=0,r8 // negate return value to get errno br.cond.sptk ia64_leave_syscall END(handle_syscall_error) /* * Invoke schedule_tail(task) while preserving in0-in7, which may be needed * in case a system call gets restarted. */ GLOBAL_ENTRY(ia64_invoke_schedule_tail) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,1,0 mov loc0=rp mov out0=r8 // Address of previous task ;; br.call.sptk.many rp=schedule_tail .ret11: mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(ia64_invoke_schedule_tail) /* * Setup stack and call do_notify_resume_user(), keeping interrupts * disabled. * * Note that pSys and pNonSys need to be set up by the caller. * We declare 8 input registers so the system call args get preserved, * in case we need to restart a system call. */ GLOBAL_ENTRY(notify_resume_user) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! mov r9=ar.unat mov loc0=rp // save return address mov out0=0 // there is no "oldset" adds out1=8,sp // out1=&sigscratch->ar_pfs (pSys) mov out2=1 // out2==1 => we're in a syscall ;; (pNonSys) mov out2=0 // out2==0 => not a syscall .fframe 16 .spillsp ar.unat, 16 st8 [sp]=r9,-16 // allocate space for ar.unat and save it st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch .body br.call.sptk.many rp=do_notify_resume_user .ret15: .restore sp adds sp=16,sp // pop scratch stack space ;; ld8 r9=[sp] // load new unat from sigscratch->scratch_unat mov rp=loc0 ;; mov ar.unat=r9 mov ar.pfs=loc1 br.ret.sptk.many rp END(notify_resume_user) ENTRY(sys_rt_sigreturn) PT_REGS_UNWIND_INFO(0) /* * Allocate 8 input registers since ptrace() may clobber them */ alloc r2=ar.pfs,8,0,1,0 .prologue PT_REGS_SAVES(16) adds sp=-16,sp .body cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall... ;; /* * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined * syscall-entry path does not save them we save them here instead. Note: we * don't need to save any other registers that are not saved by the stream-lined * syscall path, because restore_sigcontext() restores them. */ adds r16=PT(F6)+32,sp adds r17=PT(F7)+32,sp ;; stf.spill [r16]=f6,32 stf.spill [r17]=f7,32 ;; stf.spill [r16]=f8,32 stf.spill [r17]=f9,32 ;; stf.spill [r16]=f10 stf.spill [r17]=f11 adds out0=16,sp // out0 = &sigscratch br.call.sptk.many rp=ia64_rt_sigreturn .ret19: .restore sp,0 adds sp=16,sp ;; ld8 r9=[sp] // load new ar.unat mov.sptk b7=r8,ia64_leave_kernel ;; mov ar.unat=r9 br.many b7 END(sys_rt_sigreturn) GLOBAL_ENTRY(ia64_prepare_handle_unaligned) .prologue /* * r16 = fake ar.pfs, we simply need to make sure privilege is still 0 */ mov r16=r0 DO_SAVE_SWITCH_STACK br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt .ret21: .body DO_LOAD_SWITCH_STACK br.cond.sptk.many rp // goes to ia64_leave_kernel END(ia64_prepare_handle_unaligned) // // unw_init_running(void (*callback)(info, arg), void *arg) // # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15) GLOBAL_ENTRY(unw_init_running) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) alloc loc1=ar.pfs,2,3,3,0 ;; ld8 loc2=[in0],8 mov loc0=rp mov r16=loc1 DO_SAVE_SWITCH_STACK .body .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE) adds sp=-EXTRA_FRAME_SIZE,sp .body ;; adds out0=16,sp // &info mov out1=r13 // current adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack br.call.sptk.many rp=unw_init_frame_info 1: adds out0=16,sp // &info mov b6=loc2 mov loc2=gp // save gp across indirect function call ;; ld8 gp=[in0] mov out1=in1 // arg br.call.sptk.many rp=b6 // invoke the callback function 1: mov gp=loc2 // restore gp // For now, we don't allow changing registers from within // unw_init_running; if we ever want to allow that, we'd // have to do a load_switch_stack here: .restore sp adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(unw_init_running) EXPORT_SYMBOL(unw_init_running) #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE GLOBAL_ENTRY(_mcount) br ftrace_stub END(_mcount) EXPORT_SYMBOL(_mcount) .here: br.ret.sptk.many b0 GLOBAL_ENTRY(ftrace_caller) alloc out0 = ar.pfs, 8, 0, 4, 0 mov out3 = r0 ;; mov out2 = b0 add r3 = 0x20, r3 mov out1 = r1; br.call.sptk.many b0 = ftrace_patch_gp //this might be called from module, so we must patch gp ftrace_patch_gp: movl gp=__gp mov b0 = r3 ;; .global ftrace_call; ftrace_call: { .mlx nop.m 0x0 movl r3 = .here;; } alloc loc0 = ar.pfs, 4, 4, 2, 0 ;; mov loc1 = b0 mov out0 = b0 mov loc2 = r8 mov loc3 = r15 ;; adds out0 = -MCOUNT_INSN_SIZE, out0 mov out1 = in2 mov b6 = r3 br.call.sptk.many b0 = b6 ;; mov ar.pfs = loc0 mov b0 = loc1 mov r8 = loc2 mov r15 = loc3 br ftrace_stub ;; END(ftrace_caller) #else GLOBAL_ENTRY(_mcount) movl r2 = ftrace_stub movl r3 = ftrace_trace_function;; ld8 r3 = [r3];; ld8 r3 = [r3];; cmp.eq p7,p0 = r2, r3 (p7) br.sptk.many ftrace_stub ;; alloc loc0 = ar.pfs, 4, 4, 2, 0 ;; mov loc1 = b0 mov out0 = b0 mov loc2 = r8 mov loc3 = r15 ;; adds out0 = -MCOUNT_INSN_SIZE, out0 mov out1 = in2 mov b6 = r3 br.call.sptk.many b0 = b6 ;; mov ar.pfs = loc0 mov b0 = loc1 mov r8 = loc2 mov r15 = loc3 br ftrace_stub ;; END(_mcount) #endif GLOBAL_ENTRY(ftrace_stub) mov r3 = b0 movl r2 = _mcount_ret_helper ;; mov b6 = r2 mov b7 = r3 br.ret.sptk.many b6 _mcount_ret_helper: mov b0 = r42 mov r1 = r41 mov ar.pfs = r40 br b7 END(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ #define __SYSCALL(nr, entry) data8 entry .rodata .align 8 .globl sys_call_table sys_call_table: #include <asm/syscall_table.h>
aixcc-public/challenge-001-exemplar-source
2,740
arch/ia64/kernel/gate.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Linker script for gate DSO. The gate pages are an ELF shared object * prelinked to its virtual address, with only one read-only segment and * one execute-only segment (both fit in one page). This script controls * its layout. */ #include <asm/page.h> SECTIONS { . = GATE_ADDR + SIZEOF_HEADERS; .hash : { *(.hash) } :readable .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note*) } :readable :note .dynamic : { *(.dynamic) } :readable :dynamic /* * This linker script is used both with -r and with -shared. For * the layouts to match, we need to skip more than enough space for * the dynamic symbol table et al. If this amount is insufficient, * ld -shared will barf. Just increase it here. */ . = GATE_ADDR + 0x600; .data..patch : { __start_gate_mckinley_e9_patchlist = .; *(.data..patch.mckinley_e9) __end_gate_mckinley_e9_patchlist = .; __start_gate_vtop_patchlist = .; *(.data..patch.vtop) __end_gate_vtop_patchlist = .; __start_gate_fsyscall_patchlist = .; *(.data..patch.fsyscall_table) __end_gate_fsyscall_patchlist = .; __start_gate_brl_fsys_bubble_down_patchlist = .; *(.data..patch.brl_fsys_bubble_down) __end_gate_brl_fsys_bubble_down_patchlist = .; } :readable .IA_64.unwind_info : { *(.IA_64.unwind_info*) } .IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind #ifdef HAVE_BUGGY_SEGREL .text (GATE_ADDR + PAGE_SIZE) : { *(.text) *(.text.*) } :readable #else . = ALIGN(PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1)); .text : { *(.text) *(.text.*) } :epc #endif /DISCARD/ : { *(.got.plt) *(.got) *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(__ex_table) *(__mca_table) } } /* * ld does not recognize this name token; use the constant. */ #define PT_IA_64_UNWIND 0x70000001 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { readable PT_LOAD FILEHDR PHDRS FLAGS(4); /* PF_R */ #ifndef HAVE_BUGGY_SEGREL epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */ #endif dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ unwind PT_IA_64_UNWIND; } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_2.5 { global: __kernel_syscall_via_break; __kernel_syscall_via_epc; __kernel_sigtramp; local: *; }; } /* The ELF entry point can be used to set the AT_SYSINFO value. */ ENTRY(__kernel_syscall_via_epc)
aixcc-public/challenge-001-exemplar-source
7,602
arch/ia64/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/ia64/kernel/relocate_kernel.S * * Relocate kexec'able kernel and start it * * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> */ #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/kregs.h> #include <asm/page.h> #include <asm/mca_asm.h> /* Must be relocatable PIC code callable as a C function */ GLOBAL_ENTRY(relocate_new_kernel) .prologue alloc r31=ar.pfs,4,0,0,0 .body .reloc_entry: { rsm psr.i| psr.ic mov r2=ip } ;; { flushrs // must be first insn in group srlz.i } ;; dep r2=0,r2,61,3 //to physical address ;; //first switch to physical mode add r3=1f-.reloc_entry, r2 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC mov ar.rsc=0 // put RSE in enforced lazy mode ;; add sp=(memory_stack_end - 16 - .reloc_entry),r2 add r8=(register_stack - .reloc_entry),r2 ;; mov r18=ar.rnat mov ar.bspstore=r8 ;; mov cr.ipsr=r16 mov cr.iip=r3 mov cr.ifs=r0 srlz.i ;; mov ar.rnat=r18 rfi // note: this unmask MCA/INIT (psr.mc) ;; 1: //physical mode code begin mov b6=in1 dep r28=0,in2,61,3 //to physical address // purge all TC entries #define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 ;; addl r17=O(PTCE_STRIDE),r2 addl r2=O(PTCE_BASE),r2 ;; ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base ld4 r19=[r2],4 // r19=ptce_count[0] ld4 r21=[r17],4 // r21=ptce_stride[0] ;; ld4 r20=[r2] // r20=ptce_count[1] ld4 r22=[r17] // r22=ptce_stride[1] mov r24=r0 ;; adds r20=-1,r20 ;; #undef O 2: cmp.ltu p6,p7=r24,r19 (p7) br.cond.dpnt.few 4f mov ar.lc=r20 3: ptc.e r18 ;; add r18=r22,r18 br.cloop.sptk.few 3b ;; add r18=r21,r18 add r24=1,r24 ;; br.sptk.few 2b 4: srlz.i ;; // purge TR entry for kernel text and data movl r16=KERNEL_START mov r18=KERNEL_TR_PAGE_SHIFT<<2 ;; ptr.i r16, r18 ptr.d r16, r18 ;; srlz.i ;; // purge TR entry for pal code mov r16=in3 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.i r16,r18 ;; srlz.i ;; // purge TR entry for stack mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r16=r19,r16 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.i ;; //copy segments movl r16=PAGE_MASK mov r30=in0 // in0 is page_list br.sptk.few .dest_page ;; .loop: ld8 r30=[in0], 8;; .dest_page: tbit.z p0, p6=r30, 0;; // 0x1 dest page (p6) and r17=r30, r16 (p6) br.cond.sptk.few .loop;; tbit.z p0, p6=r30, 1;; // 0x2 indirect page (p6) and in0=r30, r16 (p6) br.cond.sptk.few .loop;; tbit.z p0, p6=r30, 2;; // 0x4 end flag (p6) br.cond.sptk.few .end_loop;; tbit.z p6, p0=r30, 3;; // 0x8 source page (p6) br.cond.sptk.few .loop and r18=r30, r16 // simple copy page, may optimize later movl r14=PAGE_SIZE/8 - 1;; mov ar.lc=r14;; 1: ld8 r14=[r18], 8;; st8 [r17]=r14;; fc.i r17 add r17=8, r17 br.ctop.sptk.few 1b br.sptk.few .loop ;; .end_loop: sync.i // for fc.i ;; srlz.i ;; srlz.d ;; br.call.sptk.many b0=b6;; .align 32 memory_stack: .fill 8192, 1, 0 memory_stack_end: register_stack: .fill 8192, 1, 0 register_stack_end: relocate_new_kernel_end: END(relocate_new_kernel) .global relocate_new_kernel_size relocate_new_kernel_size: data8 relocate_new_kernel_end - relocate_new_kernel GLOBAL_ENTRY(ia64_dump_cpu_regs) .prologue alloc loc0=ar.pfs,1,2,0,0 .body mov ar.rsc=0 // put RSE in enforced lazy mode add loc1=4*8, in0 // save r4 and r5 first ;; { flushrs // flush dirty regs to backing store srlz.i } st8 [loc1]=r4, 8 ;; st8 [loc1]=r5, 8 ;; add loc1=32*8, in0 mov r4=ar.rnat ;; st8 [in0]=r0, 8 // r0 st8 [loc1]=r4, 8 // rnat mov r5=pr ;; st8 [in0]=r1, 8 // r1 st8 [loc1]=r5, 8 // pr mov r4=b0 ;; st8 [in0]=r2, 8 // r2 st8 [loc1]=r4, 8 // b0 mov r5=b1; ;; st8 [in0]=r3, 24 // r3 st8 [loc1]=r5, 8 // b1 mov r4=b2 ;; st8 [in0]=r6, 8 // r6 st8 [loc1]=r4, 8 // b2 mov r5=b3 ;; st8 [in0]=r7, 8 // r7 st8 [loc1]=r5, 8 // b3 mov r4=b4 ;; st8 [in0]=r8, 8 // r8 st8 [loc1]=r4, 8 // b4 mov r5=b5 ;; st8 [in0]=r9, 8 // r9 st8 [loc1]=r5, 8 // b5 mov r4=b6 ;; st8 [in0]=r10, 8 // r10 st8 [loc1]=r5, 8 // b6 mov r5=b7 ;; st8 [in0]=r11, 8 // r11 st8 [loc1]=r5, 8 // b7 mov r4=b0 ;; st8 [in0]=r12, 8 // r12 st8 [loc1]=r4, 8 // ip mov r5=loc0 ;; st8 [in0]=r13, 8 // r13 extr.u r5=r5, 0, 38 // ar.pfs.pfm mov r4=r0 // user mask ;; st8 [in0]=r14, 8 // r14 st8 [loc1]=r5, 8 // cfm ;; st8 [in0]=r15, 8 // r15 st8 [loc1]=r4, 8 // user mask mov r5=ar.rsc ;; st8 [in0]=r16, 8 // r16 st8 [loc1]=r5, 8 // ar.rsc mov r4=ar.bsp ;; st8 [in0]=r17, 8 // r17 st8 [loc1]=r4, 8 // ar.bsp mov r5=ar.bspstore ;; st8 [in0]=r18, 8 // r18 st8 [loc1]=r5, 8 // ar.bspstore mov r4=ar.rnat ;; st8 [in0]=r19, 8 // r19 st8 [loc1]=r4, 8 // ar.rnat mov r5=ar.ccv ;; st8 [in0]=r20, 8 // r20 st8 [loc1]=r5, 8 // ar.ccv mov r4=ar.unat ;; st8 [in0]=r21, 8 // r21 st8 [loc1]=r4, 8 // ar.unat mov r5 = ar.fpsr ;; st8 [in0]=r22, 8 // r22 st8 [loc1]=r5, 8 // ar.fpsr mov r4 = ar.unat ;; st8 [in0]=r23, 8 // r23 st8 [loc1]=r4, 8 // unat mov r5 = ar.fpsr ;; st8 [in0]=r24, 8 // r24 st8 [loc1]=r5, 8 // fpsr mov r4 = ar.pfs ;; st8 [in0]=r25, 8 // r25 st8 [loc1]=r4, 8 // ar.pfs mov r5 = ar.lc ;; st8 [in0]=r26, 8 // r26 st8 [loc1]=r5, 8 // ar.lc mov r4 = ar.ec ;; st8 [in0]=r27, 8 // r27 st8 [loc1]=r4, 8 // ar.ec mov r5 = ar.csd ;; st8 [in0]=r28, 8 // r28 st8 [loc1]=r5, 8 // ar.csd mov r4 = ar.ssd ;; st8 [in0]=r29, 8 // r29 st8 [loc1]=r4, 8 // ar.ssd ;; st8 [in0]=r30, 8 // r30 ;; st8 [in0]=r31, 8 // r31 mov ar.pfs=loc0 ;; br.ret.sptk.many rp END(ia64_dump_cpu_regs)
aixcc-public/challenge-001-exemplar-source
3,005
arch/ia64/kernel/esi_stub.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * ESI call stub. * * Copyright (C) 2005 Hewlett-Packard Co * Alex Williamson <alex.williamson@hp.com> * * Based on EFI call stub by David Mosberger. The stub is virtually * identical to the one for EFI phys-mode calls, except that ESI * calls may have up to 8 arguments, so they get passed to this routine * through memory. * * This stub allows us to make ESI calls in physical mode with interrupts * turned off. ESI calls may not support calling from virtual mode. * * Google for "Extensible SAL specification" for a document describing the * ESI standard. */ /* * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System * Abstraction Layer Specification", revision 2.6e). Note that * psr.dfl and psr.dfh MUST be cleared, despite what this manual says. * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call * (the br.ia instruction fails unless psr.dfl and psr.dfh are * cleared). Fortunately, SAL promises not to touch the floating * point regs, so at least we don't have to save f2-f127. */ #define PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PSR_BITS_TO_SET \ (IA64_PSR_BN) #include <asm/processor.h> #include <asm/asmmacro.h> #include <asm/export.h> /* * Inputs: * in0 = address of function descriptor of ESI routine to call * in1 = address of array of ESI parameters * * Outputs: * r8 = result returned by called function */ GLOBAL_ENTRY(esi_call_phys) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) alloc loc1=ar.pfs,2,7,8,0 ld8 r2=[in0],8 // load ESI function's entry point mov loc0=rp .body ;; ld8 out0=[in1],8 // ESI params loaded from array ;; // passing all as inputs doesn't work ld8 out1=[in1],8 ;; ld8 out2=[in1],8 ;; ld8 out3=[in1],8 ;; ld8 out4=[in1],8 ;; ld8 out5=[in1],8 ;; ld8 out6=[in1],8 ;; ld8 out7=[in1] mov loc2=gp // save global pointer mov loc4=ar.rsc // save RSE configuration mov ar.rsc=0 // put RSE in enforced lazy, LE mode ;; ld8 gp=[in0] // load ESI function's global pointer movl r16=PSR_BITS_TO_CLEAR mov loc3=psr // save processor status word movl r17=PSR_BITS_TO_SET ;; or loc3=loc3,r17 mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov loc5=r19 // old ar.bsp mov loc6=r20 // old sp br.call.sptk.many rp=b6 // call the ESI function .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // save virtual mode psr mov r19=loc5 // save virtual mode bspstore mov r20=loc6 // save virtual mode sp br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration mov ar.pfs=loc1 mov rp=loc0 mov gp=loc2 br.ret.sptk.many rp END(esi_call_phys) EXPORT_SYMBOL_GPL(esi_call_phys)
aixcc-public/challenge-001-exemplar-source
1,135
arch/ia64/kernel/mca_drv_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * File: mca_drv_asm.S * Purpose: Assembly portion of Generic MCA handling * * Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> */ #include <linux/threads.h> #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/ptrace.h> GLOBAL_ENTRY(mca_handler_bhhook) invala // clear RSE ? cover ;; clrrrb ;; alloc r16=ar.pfs,0,2,3,0 // make a new frame mov ar.rsc=0 mov r13=IA64_KR(CURRENT) // current task pointer ;; mov r2=r13 ;; addl r22=IA64_RBS_OFFSET,r2 ;; mov ar.bspstore=r22 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; st1 [r2]=r0 // clear current->thread.on_ustack flag mov loc0=r16 movl loc1=mca_handler_bh // recovery C function ;; mov out0=r8 // poisoned address mov out1=r9 // iip mov out2=r10 // psr mov b6=loc1 ;; mov loc1=rp ssm psr.ic ;; srlz.i ;; ssm psr.i br.call.sptk.many rp=b6 // does not return ... ;; mov ar.pfs=loc0 mov rp=loc1 ;; mov r8=r0 br.ret.sptk.many rp END(mca_handler_bhhook)
aixcc-public/challenge-001-exemplar-source
4,335
arch/ia64/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/pgtable.h> #include <asm/cache.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 16 #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_ARCH(ia64) ENTRY(phys_start) jiffies = jiffies_64; PHDRS { text PT_LOAD; percpu PT_LOAD; data PT_LOAD; note PT_NOTE; unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ } SECTIONS { /* * unwind exit sections must be discarded before * the rest of the sections get included. */ /DISCARD/ : { *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) *(.comment) *(.note) } v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ phys_start = _start - LOAD_OFFSET; code : { } :text . = KERNEL_START; _text = .; _stext = .; .text : AT(ADDR(.text) - LOAD_OFFSET) { __start_ivt_text = .; *(.text..ivt) __end_ivt_text = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.gnu.linkonce.t*) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) { *(.text..lock) } #endif _etext = .; /* * Read-only data */ /* MCA table */ . = ALIGN(16); __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) { __start___mca_table = .; *(__mca_table) __stop___mca_table = .; } .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } /* * Global data */ _data = .; /* Unwind info & table: */ . = ALIGN(8); .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) { *(.IA_64.unwind_info*) } .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) { __start_unwind = .; *(.IA_64.unwind*) __end_unwind = .; } :text :unwind code_continues2 : { } :text RO_DATA(4096) .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; *(.opd) __end_opd = .; } /* * Initialization code and data: */ . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; *(.data..patch.vtop) __end___vtop_patchlist = .; } .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; *(.data..patch.rse) __end___rse_patchlist = .; } .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } #ifdef CONFIG_SMP . = ALIGN(PERCPU_PAGE_SIZE); __cpu0_per_cpu = .; . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ #endif . = ALIGN(PAGE_SIZE); __init_end = .; .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; *(.data..gate) __stop_gate_section = .; } /* * make sure the gate page doesn't expose * kernel data */ . = ALIGN(PAGE_SIZE); /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; /* * ensure percpu data fits * into percpu page size */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; data : { } :data .data : AT(ADDR(.data) - LOAD_OFFSET) { _sdata = .; INIT_TASK_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) READ_MOSTLY_DATA(SMP_CACHE_BYTES) DATA_DATA *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS } BUG_TABLE . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ .got : AT(ADDR(.got) - LOAD_OFFSET) { *(.got.plt) *(.got) } __gp = ADDR(.got) + 0x200000; /* * We want the small data sections together, * so single-instruction offsets can access * them all, and initialized data all before * uninitialized, so we can shorten the * on-disk segment size. */ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { *(.sdata) *(.sdata1) *(.srdata) } _edata = .; BSS_SECTION(0, 0, 0) _end = .; code : { } :text STABS_DEBUG DWARF_DEBUG ELF_DETAILS /* Default discards */ DISCARDS }
aixcc-public/challenge-001-exemplar-source
11,606
arch/ia64/kernel/gate.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains the code that gets mapped at the upper end of each task's text * region. For now, it contains the signal trampoline code only. * * Copyright (C) 1999-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/sigcontext.h> #include <asm/unistd.h> #include <asm/kregs.h> #include <asm/page.h> #include <asm/native/inst.h> /* * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, * complications with the linker (which likes to create PLT stubs for branches * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ .xdata4 ".data..patch.fsyscall_table", 1b-. .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue .altrp b6 .body /* * Note: for (fast) syscall restart to work, the break instruction must be * the first one in the bundle addressed by syscall_via_break. */ { .mib break 0x100000 nop.i 0 br.ret.sptk.many b6 } END(__kernel_syscall_via_break) # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) # define SIGHANDLER_OFF (16 + IA64_SIGFRAME_HANDLER_OFFSET) # define SIGCONTEXT_OFF (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET) # define FLAGS_OFF IA64_SIGCONTEXT_FLAGS_OFFSET # define CFM_OFF IA64_SIGCONTEXT_CFM_OFFSET # define FR6_OFF IA64_SIGCONTEXT_FR6_OFFSET # define BSP_OFF IA64_SIGCONTEXT_AR_BSP_OFFSET # define RNAT_OFF IA64_SIGCONTEXT_AR_RNAT_OFFSET # define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET # define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET # define PR_OFF IA64_SIGCONTEXT_PR_OFFSET # define RP_OFF IA64_SIGCONTEXT_IP_OFFSET # define SP_OFF IA64_SIGCONTEXT_R12_OFFSET # define RBS_BASE_OFF IA64_SIGCONTEXT_RBS_BASE_OFFSET # define LOADRS_OFF IA64_SIGCONTEXT_LOADRS_OFFSET # define base0 r2 # define base1 r3 /* * When we get here, the memory stack looks like this: * * +===============================+ * | | * // struct sigframe // * | | * +-------------------------------+ <-- sp+16 * | 16 byte of scratch | * | space | * +-------------------------------+ <-- sp * * The register stack looks _exactly_ the way it looked at the time the signal * occurred. In other words, we're treading on a potential mine-field: each * incoming general register may be a NaT value (including sp, in which case the * process ends up dying with a SIGSEGV). * * The first thing need to do is a cover to get the registers onto the backing * store. Once that is done, we invoke the signal handler which may modify some * of the machine state. After returning from the signal handler, we return * control to the previous context by executing a sigreturn system call. A signal * handler may call the rt_sigreturn() function to directly return to a given * sigcontext. However, the user-level sigreturn() needs to do much more than * calling the rt_sigreturn() system call as it needs to unwind the stack to * restore preserved registers that may have been saved on the signal handler's * call stack. */ #define SIGTRAMP_SAVES \ .unwabi 3, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \ .unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */ \ .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \ .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \ .savesp pr, PR_OFF+SIGCONTEXT_OFF; \ .savesp rp, RP_OFF+SIGCONTEXT_OFF; \ .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \ .vframesp SP_OFF+SIGCONTEXT_OFF GLOBAL_ENTRY(__kernel_sigtramp) // describe the state that is active when we get here: .prologue SIGTRAMP_SAVES .body .label_state 1 adds base0=SIGHANDLER_OFF,sp adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp br.call.sptk.many rp=1f 1: ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF) // get pointer to signal handler's plabel ld8 r15=[base1] // get address of new RBS base (or NULL) cover // push args in interrupted frame onto backing store ;; cmp.ne p1,p0=r15,r0 // do we need to switch rbs? (note: pr is saved by kernel) mov.m r9=ar.bsp // fetch ar.bsp .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF (p1) br.cond.spnt setup_rbs // yup -> (clobbers p8, r14-r16, and r18-r20) back_from_setup_rbs: alloc r8=ar.pfs,0,0,3,0 ld8 out0=[base0],16 // load arg0 (signum) adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1 ;; ld8 out1=[base1] // load arg1 (siginfop) ld8 r10=[r17],8 // get signal handler entry point ;; ld8 out2=[base0] // load arg2 (sigcontextp) ld8 gp=[r17] // get signal handler's global pointer adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp ;; .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF st8 [base0]=r9 // save sc_ar_bsp adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; stf.spill [base0]=f6,32 stf.spill [base1]=f7,32 ;; stf.spill [base0]=f8,32 stf.spill [base1]=f9,32 mov b6=r10 ;; stf.spill [base0]=f10,32 stf.spill [base1]=f11,32 ;; stf.spill [base0]=f12,32 stf.spill [base1]=f13,32 ;; stf.spill [base0]=f14,32 stf.spill [base1]=f15,32 br.call.sptk.many rp=b6 // call the signal handler .ret0: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp ;; ld8 r15=[base0] // fetch sc_ar_bsp mov r14=ar.bsp ;; cmp.ne p1,p0=r14,r15 // do we need to restore the rbs? (p1) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7) ;; back_from_restore_rbs: adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; ldf.fill f6=[base0],32 ldf.fill f7=[base1],32 ;; ldf.fill f8=[base0],32 ldf.fill f9=[base1],32 ;; ldf.fill f10=[base0],32 ldf.fill f11=[base1],32 ;; ldf.fill f12=[base0],32 ldf.fill f13=[base1],32 ;; ldf.fill f14=[base0],32 ldf.fill f15=[base1],32 mov r15=__NR_rt_sigreturn .restore sp // pop .prologue break __BREAK_SYSCALL .prologue SIGTRAMP_SAVES setup_rbs: mov ar.rsc=0 // put RSE into enforced lazy mode ;; .save ar.rnat, r19 mov r19=ar.rnat // save RNaT before switching backing store area adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp mov r18=ar.bspstore mov ar.bspstore=r15 // switch over to new register backing store area ;; .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF st8 [r14]=r19 // save sc_ar_rnat .body mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16 adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp ;; invala sub r15=r16,r15 extr.u r20=r18,3,6 ;; mov ar.rsc=0xf // set RSE into eager mode, pl 3 cmp.eq p8,p0=63,r20 shl r15=r15,16 ;; st8 [r14]=r15 // save sc_loadrs (p8) st8 [r18]=r19 // if bspstore points at RNaT slot, store RNaT there now .restore sp // pop .prologue br.cond.sptk back_from_setup_rbs .prologue SIGTRAMP_SAVES .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .body restore_rbs: // On input: // r14 = bsp1 (bsp at the time of return from signal handler) // r15 = bsp0 (bsp at the time the signal occurred) // // Here, we need to calculate bspstore0, the value that ar.bspstore needs // to be set to, based on bsp0 and the size of the dirty partition on // the alternate stack (sc_loadrs >> 16). This can be done with the // following algorithm: // // bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1)); // // This is what the code below does. // alloc r2=ar.pfs,0,0,0,0 // alloc null frame adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp ;; ld8 r17=[r16] ld8 r16=[r18] // get new rnat extr.u r18=r15,3,6 // r18 <- rse_slot_num(bsp0) ;; mov ar.rsc=r17 // put RSE into enforced lazy mode shr.u r17=r17,16 ;; sub r14=r14,r17 // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16) shr.u r17=r17,3 // r17 <- (sc_loadrs >> 19) ;; loadrs // restore dirty partition extr.u r14=r14,3,6 // r14 <- rse_slot_num(bspstore1) ;; add r14=r14,r17 // r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19) ;; shr.u r14=r14,6 // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40 ;; sub r14=r14,r17 // r14 <- -rse_num_regs(bspstore1, bsp1) movl r17=0x8208208208208209 ;; add r18=r18,r14 // r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1) setf.sig f7=r17 cmp.lt p7,p0=r14,r0 // p7 <- (r14 < 0)? ;; (p7) adds r18=-62,r18 // delta -= 62 ;; setf.sig f6=r18 ;; xmpy.h f6=f6,f7 ;; getf.sig r17=f6 ;; add r17=r17,r18 shr r18=r18,63 ;; shr r17=r17,5 ;; sub r17=r17,r18 // r17 = delta/63 ;; add r17=r14,r17 // r17 <- delta/63 - rse_num_regs(bspstore1, bsp1) ;; shladd r15=r17,3,r15 // r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1)) ;; mov ar.bspstore=r15 // switch back to old register backing store area ;; mov ar.rnat=r16 // restore RNaT mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc) // invala not necessary as that will happen when returning to user-mode br.cond.sptk back_from_restore_rbs END(__kernel_sigtramp) /* * On entry: * r11 = saved ar.pfs * r15 = system call # * b0 = saved return address * b6 = return address * On exit: * r11 = saved ar.pfs * r15 = system call # * b0 = saved return address * all other "scratch" registers: undefined * all "preserved" registers: same as on entry */ GLOBAL_ENTRY(__kernel_syscall_via_epc) .prologue .altrp b6 .body { /* * Note: the kernel cannot assume that the first two instructions in this * bundle get executed. The remaining code must be safe even if * they do not get executed. */ adds r17=-1024,r15 // A mov r10=0 // A default to successful syscall execution epc // B causes split-issue } ;; RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) LOAD_FSYSCALL_TABLE(r14) // X ;; mov r16=IA64_KR(CURRENT) // M2 (12 cyc) shladd r18=r17,3,r14 // A mov r19=NR_syscalls-1 // A ;; lfetch [r18] // M0|1 MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) // If r17 is a NaT, p6 will be zero cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? ;; mov r21=ar.fpsr // M2 (12 cyc) tnat.nz p10,p9=r15 // I0 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) ;; srlz.d // M0 (forces split-issue) ensure PSR.BE==0 (p6) ld8 r18=[r18] // M0|1 nop.i 0 ;; nop.m 0 (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) nop.i 0 ;; SSM_PSR_I(p8, p14, r25) (p6) mov b7=r18 // I0 (p8) br.dptk.many b7 // B mov r27=ar.rsc // M2 (12 cyc) /* * brl.cond doesn't work as intended because the linker would convert this branch * into a branch to a PLT. Perhaps there will be a way to avoid this with some * future version of the linker. In the meantime, we just use an indirect branch * instead. */ #ifdef CONFIG_ITANIUM (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry ;; (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down ;; (p6) mov b7=r14 (p6) br.sptk.many b7 #else BRL_COND_FSYS_BUBBLE_DOWN(p6) #endif SSM_PSR_I(p0, p14, r10) mov r10=-1 (p10) mov r8=EINVAL (p9) mov r8=ENOSYS FSYS_RETURN END(__kernel_syscall_via_epc)
aixcc-public/challenge-001-exemplar-source
24,625
arch/ia64/kernel/fsys.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains the light-weight system call handlers (fsyscall-handlers). * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 25-Sep-03 davidm Implement fsys_rt_sigprocmask(). * 18-Feb-03 louisk Implement fsys_gettimeofday(). * 28-Feb-03 davidm Fixed several bugs in fsys_gettimeofday(). Tuned it some more, * probably broke it along the way... ;-) * 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make * it capable of using memory based clocks without falling back to C code. * 08-Feb-07 Fenghua Yu Implement fsys_getcpu. * */ #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/thread_info.h> #include <asm/sal.h> #include <asm/signal.h> #include <asm/unistd.h> #include "entry.h" #include <asm/native/inst.h> /* * See Documentation/ia64/fsys.rst for details on fsyscalls. * * On entry to an fsyscall handler: * r10 = 0 (i.e., defaults to "successful syscall return") * r11 = saved ar.pfs (a user-level value) * r15 = system call number * r16 = "current" task pointer (in normal kernel-mode, this is in r13) * r32-r39 = system call arguments * b6 = return address (a user-level value) * ar.pfs = previous frame-state (a user-level value) * PSR.be = cleared to zero (i.e., little-endian byte order is in effect) * all other registers may contain values passed in from user-mode * * On return from an fsyscall handler: * r11 = saved ar.pfs (as passed into the fsyscall handler) * r15 = system call number (as passed into the fsyscall handler) * r32-r39 = system call arguments (as passed into the fsyscall handler) * b6 = return address (as passed into the fsyscall handler) * ar.pfs = previous frame-state (as passed into the fsyscall handler) */ ENTRY(fsys_ni_syscall) .prologue .altrp b6 .body mov r8=ENOSYS mov r10=-1 FSYS_RETURN END(fsys_ni_syscall) ENTRY(fsys_getpid) .prologue .altrp b6 .body add r17=IA64_TASK_SIGNAL_OFFSET,r16 ;; ld8 r17=[r17] // r17 = current->signal add r9=TI_FLAGS+IA64_TASK_SIZE,r16 ;; ld4 r9=[r9] add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17 ;; and r9=TIF_ALLWORK_MASK,r9 ld8 r17=[r17] // r17 = current->signal->pids[PIDTYPE_TGID] ;; add r8=IA64_PID_LEVEL_OFFSET,r17 ;; ld4 r8=[r8] // r8 = pid->level add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] ;; shl r8=r8,IA64_UPID_SHIFT ;; add r17=r17,r8 // r17 = &pid->numbers[pid->level] ;; ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr ;; mov r17=0 ;; cmp.ne p8,p0=0,r9 (p8) br.spnt.many fsys_fallback_syscall FSYS_RETURN END(fsys_getpid) ENTRY(fsys_set_tid_address) .prologue .altrp b6 .body add r9=TI_FLAGS+IA64_TASK_SIZE,r16 add r17=IA64_TASK_THREAD_PID_OFFSET,r16 ;; ld4 r9=[r9] tnat.z p6,p7=r32 // check argument register for being NaT ld8 r17=[r17] // r17 = current->thread_pid ;; and r9=TIF_ALLWORK_MASK,r9 add r8=IA64_PID_LEVEL_OFFSET,r17 add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 ;; ld4 r8=[r8] // r8 = pid->level add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] ;; shl r8=r8,IA64_UPID_SHIFT ;; add r17=r17,r8 // r17 = &pid->numbers[pid->level] ;; ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr ;; cmp.ne p8,p0=0,r9 mov r17=-1 ;; (p6) st8 [r18]=r32 (p7) st8 [r18]=r17 (p8) br.spnt.many fsys_fallback_syscall ;; mov r17=0 // i must not leak kernel bits... mov r18=0 // i must not leak kernel bits... FSYS_RETURN END(fsys_set_tid_address) #if IA64_GTOD_SEQ_OFFSET !=0 #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t #endif #if IA64_ITC_JITTER_OFFSET !=0 #error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t #endif #define CLOCK_REALTIME 0 #define CLOCK_MONOTONIC 1 #define CLOCK_DIVIDE_BY_1000 0x4000 #define CLOCK_ADD_MONOTONIC 0x8000 ENTRY(fsys_gettimeofday) .prologue .altrp b6 .body mov r31 = r32 tnat.nz p6,p0 = r33 // guard against NaT argument (p6) br.cond.spnt.few .fail_einval mov r30 = CLOCK_DIVIDE_BY_1000 ;; .gettime: // Register map // Incoming r31 = pointer to address where to place result // r30 = flags determining how time is processed // r2,r3 = temp r4-r7 preserved // r8 = result nanoseconds // r9 = result seconds // r10 = temporary storage for clock difference // r11 = preserved: saved ar.pfs // r12 = preserved: memory stack // r13 = preserved: thread pointer // r14 = address of mask / mask value // r15 = preserved: system call number // r16 = preserved: current task pointer // r17 = (not used) // r18 = (not used) // r19 = address of itc_lastcycle // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence) // r21 = address of mmio_ptr // r22 = address of wall_time or monotonic_time // r23 = address of shift / value // r24 = address mult factor / cycle_last value // r25 = itc_lastcycle value // r26 = address clocksource cycle_last // r27 = (not used) // r28 = sequence number at the beginning of critical section // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result // Predicates // p6,p7 short term use // p8 = timesource ar.itc // p9 = timesource mmio64 // p10 = timesource mmio32 - not used // p11 = timesource not to be handled by asm code // p12 = memory time source ( = p9 | p10) - not used // p13 = do cmpxchg with itc_lastcycle // p14 = Divide by 1000 // p15 = Add monotonic // // Note that instructions are optimized for McKinley. McKinley can // process two bundles simultaneously and therefore we continuously // try to feed the CPU two bundles and then a stop. add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 tnat.nz p6,p0 = r31 // guard against Nat argument (p6) br.cond.spnt.few .fail_einval movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address ;; ld4 r2 = [r2] // process work pending flags movl r29 = itc_jitter_data // itc_jitter add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 mov pr = r30,0xc000 // Set predicates according to function ;; and r2 = TIF_ALLWORK_MASK,r2 add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time ;; add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled (p6) br.cond.spnt.many fsys_fallback_syscall ;; // Begin critical section .time_redo: ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first ;; and r28 = ~1,r28 // And make sequence even to force retry if odd ;; ld8 r30 = [r21] // clocksource->mmio_ptr add r24 = IA64_CLKSRC_MULT_OFFSET,r20 ld4 r2 = [r29] // itc_jitter value add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20 add r14 = IA64_CLKSRC_MASK_OFFSET,r20 ;; ld4 r3 = [r24] // clocksource mult value ld8 r14 = [r14] // clocksource mask value cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr ;; setf.sig f7 = r3 // Setup for mult scaling of counter (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 ld4 r23 = [r23] // clocksource shift value ld8 r24 = [r26] // get clksrc_cycle_last value (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control ;; .pred.rel.mutex p8,p9 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. (p13) ld8 r25 = [r19] // get itc_lastcycle value ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec ;; ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec (p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) ;; (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared sub r10 = r2,r24 // current_cycle - last_cycle ;; (p6) sub r10 = r25,r24 // time we got was less than last_cycle (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg ;; (p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv ;; (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful ;; (p7) sub r10 = r3,r24 // then use new last_cycle instead ;; and r10 = r10,r14 // Apply mask ;; setf.sig f8 = r10 nop.i 123 ;; // fault check takes 5 cycles and we have spare time EX(.fail_efault, probe.w.fault r31, 3) xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) ;; getf.sig r2 = f8 mf ;; ld4 r10 = [r20] // gtod_lock.sequence add r8 = r8,r2 // Add xtime.nsecs ;; shr.u r8 = r8,r23 // shift by factor cmp4.ne p7,p0 = r28,r10 (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo // End critical section. // Now r8=tv->tv_nsec and r9=tv->tv_sec mov r10 = r0 movl r2 = 1000000000 add r23 = IA64_TIMESPEC_TV_NSEC_OFFSET, r31 (p14) movl r3 = 2361183241434822607 // Prep for / 1000 hack ;; .time_normalize: mov r21 = r8 cmp.ge p6,p0 = r8,r2 (p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time ;; (p14) setf.sig f8 = r20 (p6) sub r8 = r8,r2 (p6) add r9 = 1,r9 // two nops before the branch. (p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod (p6) br.cond.dpnt.few .time_normalize ;; // Divided by 8 though shift. Now divide by 125 // The compiler was able to do that with a multiply // and a shift and we do the same EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it ;; (p14) getf.sig r2 = f8 ;; mov r8 = r0 (p14) shr.u r21 = r2, 4 ;; EX(.fail_efault, st8 [r31] = r9) EX(.fail_efault, st8 [r23] = r21) FSYS_RETURN .fail_einval: mov r8 = EINVAL mov r10 = -1 FSYS_RETURN .fail_efault: mov r8 = EFAULT mov r10 = -1 FSYS_RETURN END(fsys_gettimeofday) ENTRY(fsys_clock_gettime) .prologue .altrp b6 .body cmp4.ltu p6, p0 = CLOCK_MONOTONIC, r32 // Fallback if this is not CLOCK_REALTIME or CLOCK_MONOTONIC (p6) br.spnt.few fsys_fallback_syscall mov r31 = r33 shl r30 = r32,15 br.many .gettime END(fsys_clock_gettime) /* * fsys_getcpu doesn't use the third parameter in this implementation. It reads * current_thread_info()->cpu and corresponding node in cpu_to_node_map. */ ENTRY(fsys_getcpu) .prologue .altrp b6 .body ;; add r2=TI_FLAGS+IA64_TASK_SIZE,r16 tnat.nz p6,p0 = r32 // guard against NaT argument add r3=TI_CPU+IA64_TASK_SIZE,r16 ;; ld4 r3=[r3] // M r3 = thread_info->cpu ld4 r2=[r2] // M r2 = thread_info->flags (p6) br.cond.spnt.few .fail_einval // B ;; tnat.nz p7,p0 = r33 // I guard against NaT argument (p7) br.cond.spnt.few .fail_einval // B ;; cmp.ne p6,p0=r32,r0 cmp.ne p7,p0=r33,r0 ;; #ifdef CONFIG_NUMA movl r17=cpu_to_node_map ;; EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles shladd r18=r3,1,r17 ;; ld2 r20=[r18] // r20 = cpu_to_node_map[cpu] and r2 = TIF_ALLWORK_MASK,r2 ;; cmp.ne p8,p0=0,r2 (p8) br.spnt.many fsys_fallback_syscall ;; ;; EX(.fail_efault, (p6) st4 [r32] = r3) EX(.fail_efault, (p7) st2 [r33] = r20) mov r8=0 ;; #else EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles and r2 = TIF_ALLWORK_MASK,r2 ;; cmp.ne p8,p0=0,r2 (p8) br.spnt.many fsys_fallback_syscall ;; EX(.fail_efault, (p6) st4 [r32] = r3) EX(.fail_efault, (p7) st2 [r33] = r0) mov r8=0 ;; #endif FSYS_RETURN END(fsys_getcpu) ENTRY(fsys_fallback_syscall) .prologue .altrp b6 .body /* * We only get here from light-weight syscall handlers. Thus, we already * know that r15 contains a valid syscall number. No need to re-check. */ adds r17=-1024,r15 movl r14=sys_call_table ;; RSM_PSR_I(p0, r26, r27) shladd r18=r17,3,r14 ;; ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) mov r27=ar.rsc mov r21=ar.fpsr mov r26=ar.pfs END(fsys_fallback_syscall) /* FALL THROUGH */ GLOBAL_ENTRY(fsys_bubble_down) .prologue .altrp b6 .body /* * We get here for syscalls that don't have a lightweight * handler. For those, we need to bubble down into the kernel * and that requires setting up a minimal pt_regs structure, * and initializing the CPU state more or less as if an * interruption had occurred. To make syscall-restarts work, * we setup pt_regs such that cr_iip points to the second * instruction in syscall_via_break. Decrementing the IP * hence will restart the syscall via break and not * decrementing IP will return us to the caller, as usual. * Note that we preserve the value of psr.pp rather than * initializing it from dcr.pp. This makes it possible to * distinguish fsyscall execution from other privileged * execution. * * On entry: * - normal fsyscall handler register usage, except * that we also have: * - r18: address of syscall entry point * - r21: ar.fpsr * - r26: ar.pfs * - r27: ar.rsc * - r29: psr * * We used to clear some PSR bits here but that requires slow * serialization. Fortunately, that isn't really necessary. * The rationale is as follows: we used to clear bits * ~PSR_PRESERVED_BITS in PSR.L. Since * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}. * However, * * PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.AC : don't care (kernel normally turns PSR.AC on) * PSR.I : already turned off by the time fsys_bubble_down gets * invoked * PSR.DFL: always 0 (kernel never turns it on) * PSR.DFH: don't care --- kernel never touches f32-f127 on its own * initiative * PSR.DI : always 0 (kernel never turns it on) * PSR.SI : always 0 (kernel never turns it on) * PSR.DB : don't care --- kernel never enables kernel-level * breakpoints * PSR.TB : must be 0 already; if it wasn't zero on entry to * __kernel_syscall_via_epc, the branch to fsys_bubble_down * will trigger a taken branch; the taken-trap-handler then * converts the syscall into a break-based system-call. */ /* * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. * The rest we have to synthesize. */ # define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \ | (0x1 << IA64_PSR_RI_BIT) \ | IA64_PSR_BN | IA64_PSR_I) invala // M0|1 movl r14=ia64_ret_from_syscall // X nop.m 0 movl r28=__kernel_syscall_via_break // X create cr.iip ;; mov r2=r16 // A get task addr to addl-addressable register adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A mov r31=pr // I0 save pr (2 cyc) ;; st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A ;; ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store nop.i 0 ;; mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting #else nop.m 0 #endif nop.i 0 ;; mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!) nop.i 0 ;; mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS movl r8=PSR_ONE_BITS // X ;; mov r25=ar.unat // M2 (5 cyc) save ar.unat mov r19=b6 // I0 save b6 (2 cyc) mov r20=r1 // A save caller's gp in r20 ;; or r29=r8,r29 // A construct cr.ipsr value to save mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc) cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 br.call.sptk.many b7=ia64_syscall_setup // B ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mov.m r30=ar.itc is called in advance add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel ;; ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime ld8 r21=[r17] // cumulated utime sub r22=r19,r18 // stime before leave kernel ;; st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp sub r18=r30,r19 // elapsed time in user mode ;; add r20=r20,r22 // sum stime add r21=r21,r18 // sum utime ;; st8 [r16]=r20 // update stime st8 [r17]=r21 // update utime ;; #endif mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 mov rp=r14 // I0 set the real return addr and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A ;; SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs cmp.eq p8,p0=r3,r0 // A (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT nop.m 0 (p8) br.call.sptk.many b6=b6 // B (ignore return address) br.cond.spnt ia64_trace_syscall // B END(fsys_bubble_down) .rodata .align 8 .globl fsyscall_table data8 fsys_bubble_down fsyscall_table: data8 fsys_ni_syscall data8 0 // exit // 1025 data8 0 // read data8 0 // write data8 0 // open data8 0 // close data8 0 // creat // 1030 data8 0 // link data8 0 // unlink data8 0 // execve data8 0 // chdir data8 0 // fchdir // 1035 data8 0 // utimes data8 0 // mknod data8 0 // chmod data8 0 // chown data8 0 // lseek // 1040 data8 fsys_getpid // getpid data8 0 // getppid data8 0 // mount data8 0 // umount data8 0 // setuid // 1045 data8 0 // getuid data8 0 // geteuid data8 0 // ptrace data8 0 // access data8 0 // sync // 1050 data8 0 // fsync data8 0 // fdatasync data8 0 // kill data8 0 // rename data8 0 // mkdir // 1055 data8 0 // rmdir data8 0 // dup data8 0 // pipe data8 0 // times data8 0 // brk // 1060 data8 0 // setgid data8 0 // getgid data8 0 // getegid data8 0 // acct data8 0 // ioctl // 1065 data8 0 // fcntl data8 0 // umask data8 0 // chroot data8 0 // ustat data8 0 // dup2 // 1070 data8 0 // setreuid data8 0 // setregid data8 0 // getresuid data8 0 // setresuid data8 0 // getresgid // 1075 data8 0 // setresgid data8 0 // getgroups data8 0 // setgroups data8 0 // getpgid data8 0 // setpgid // 1080 data8 0 // setsid data8 0 // getsid data8 0 // sethostname data8 0 // setrlimit data8 0 // getrlimit // 1085 data8 0 // getrusage data8 fsys_gettimeofday // gettimeofday data8 0 // settimeofday data8 0 // select data8 0 // poll // 1090 data8 0 // symlink data8 0 // readlink data8 0 // uselib data8 0 // swapon data8 0 // swapoff // 1095 data8 0 // reboot data8 0 // truncate data8 0 // ftruncate data8 0 // fchmod data8 0 // fchown // 1100 data8 0 // getpriority data8 0 // setpriority data8 0 // statfs data8 0 // fstatfs data8 0 // gettid // 1105 data8 0 // semget data8 0 // semop data8 0 // semctl data8 0 // msgget data8 0 // msgsnd // 1110 data8 0 // msgrcv data8 0 // msgctl data8 0 // shmget data8 0 // shmat data8 0 // shmdt // 1115 data8 0 // shmctl data8 0 // syslog data8 0 // setitimer data8 0 // getitimer data8 0 // 1120 data8 0 data8 0 data8 0 // vhangup data8 0 // lchown data8 0 // remap_file_pages // 1125 data8 0 // wait4 data8 0 // sysinfo data8 0 // clone data8 0 // setdomainname data8 0 // newuname // 1130 data8 0 // adjtimex data8 0 data8 0 // init_module data8 0 // delete_module data8 0 // 1135 data8 0 data8 0 // quotactl data8 0 // bdflush data8 0 // sysfs data8 0 // personality // 1140 data8 0 // afs_syscall data8 0 // setfsuid data8 0 // setfsgid data8 0 // getdents data8 0 // flock // 1145 data8 0 // readv data8 0 // writev data8 0 // pread64 data8 0 // pwrite64 data8 0 // sysctl // 1150 data8 0 // mmap data8 0 // munmap data8 0 // mlock data8 0 // mlockall data8 0 // mprotect // 1155 data8 0 // mremap data8 0 // msync data8 0 // munlock data8 0 // munlockall data8 0 // sched_getparam // 1160 data8 0 // sched_setparam data8 0 // sched_getscheduler data8 0 // sched_setscheduler data8 0 // sched_yield data8 0 // sched_get_priority_max // 1165 data8 0 // sched_get_priority_min data8 0 // sched_rr_get_interval data8 0 // nanosleep data8 0 // nfsservctl data8 0 // prctl // 1170 data8 0 // getpagesize data8 0 // mmap2 data8 0 // pciconfig_read data8 0 // pciconfig_write data8 0 // perfmonctl // 1175 data8 0 // sigaltstack data8 0 // rt_sigaction data8 0 // rt_sigpending data8 0 // rt_sigprocmask data8 0 // rt_sigqueueinfo // 1180 data8 0 // rt_sigreturn data8 0 // rt_sigsuspend data8 0 // rt_sigtimedwait data8 0 // getcwd data8 0 // capget // 1185 data8 0 // capset data8 0 // sendfile data8 0 data8 0 data8 0 // socket // 1190 data8 0 // bind data8 0 // connect data8 0 // listen data8 0 // accept data8 0 // getsockname // 1195 data8 0 // getpeername data8 0 // socketpair data8 0 // send data8 0 // sendto data8 0 // recv // 1200 data8 0 // recvfrom data8 0 // shutdown data8 0 // setsockopt data8 0 // getsockopt data8 0 // sendmsg // 1205 data8 0 // recvmsg data8 0 // pivot_root data8 0 // mincore data8 0 // madvise data8 0 // newstat // 1210 data8 0 // newlstat data8 0 // newfstat data8 0 // clone2 data8 0 // getdents64 data8 0 // getunwind // 1215 data8 0 // readahead data8 0 // setxattr data8 0 // lsetxattr data8 0 // fsetxattr data8 0 // getxattr // 1220 data8 0 // lgetxattr data8 0 // fgetxattr data8 0 // listxattr data8 0 // llistxattr data8 0 // flistxattr // 1225 data8 0 // removexattr data8 0 // lremovexattr data8 0 // fremovexattr data8 0 // tkill data8 0 // futex // 1230 data8 0 // sched_setaffinity data8 0 // sched_getaffinity data8 fsys_set_tid_address // set_tid_address data8 0 // fadvise64_64 data8 0 // tgkill // 1235 data8 0 // exit_group data8 0 // lookup_dcookie data8 0 // io_setup data8 0 // io_destroy data8 0 // io_getevents // 1240 data8 0 // io_submit data8 0 // io_cancel data8 0 // epoll_create data8 0 // epoll_ctl data8 0 // epoll_wait // 1245 data8 0 // restart_syscall data8 0 // semtimedop data8 0 // timer_create data8 0 // timer_settime data8 0 // timer_gettime // 1250 data8 0 // timer_getoverrun data8 0 // timer_delete data8 0 // clock_settime data8 fsys_clock_gettime // clock_gettime data8 0 // clock_getres // 1255 data8 0 // clock_nanosleep data8 0 // fstatfs64 data8 0 // statfs64 data8 0 // mbind data8 0 // get_mempolicy // 1260 data8 0 // set_mempolicy data8 0 // mq_open data8 0 // mq_unlink data8 0 // mq_timedsend data8 0 // mq_timedreceive // 1265 data8 0 // mq_notify data8 0 // mq_getsetattr data8 0 // kexec_load data8 0 // vserver data8 0 // waitid // 1270 data8 0 // add_key data8 0 // request_key data8 0 // keyctl data8 0 // ioprio_set data8 0 // ioprio_get // 1275 data8 0 // move_pages data8 0 // inotify_init data8 0 // inotify_add_watch data8 0 // inotify_rm_watch data8 0 // migrate_pages // 1280 data8 0 // openat data8 0 // mkdirat data8 0 // mknodat data8 0 // fchownat data8 0 // futimesat // 1285 data8 0 // newfstatat data8 0 // unlinkat data8 0 // renameat data8 0 // linkat data8 0 // symlinkat // 1290 data8 0 // readlinkat data8 0 // fchmodat data8 0 // faccessat data8 0 data8 0 // 1295 data8 0 // unshare data8 0 // splice data8 0 // set_robust_list data8 0 // get_robust_list data8 0 // sync_file_range // 1300 data8 0 // tee data8 0 // vmsplice data8 0 data8 fsys_getcpu // getcpu // 1304 // fill in zeros for the remaining entries .zero: .space fsyscall_table + 8*NR_syscalls - .zero, 0
aixcc-public/challenge-001-exemplar-source
53,034
arch/ia64/kernel/ivt.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/ia64/kernel/ivt.S * * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * Copyright (C) 2000, 2002-2003 Intel Co * Asit Mallick <asit.k.mallick@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Kenneth Chen <kenneth.w.chen@intel.com> * Fenghua Yu <fenghua.yu@intel.com> * * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. * * Copyright (C) 2005 Hewlett-Packard Co * Dan Magenheimer <dan.magenheimer@hp.com> * Xen paravirtualization * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * pv_ops. * Yaozu (Eddie) Dong <eddie.dong@intel.com> */ /* * This file defines the interruption vector table used by the CPU. * It does not include one entry per possible cause of interruption. * * The first 20 entries of the table contain 64 bundles each while the * remaining 48 entries contain only 16 bundles each. * * The 64 bundles are used to allow inlining the whole handler for critical * interruptions like TLB misses. * * For each entry, the comment is as follows: * * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) * entry offset ----/ / / / / * entry number ---------/ / / / * size of the entry -------------/ / / * vector name -------------------------------------/ / * interruptions triggering this vector ----------------------/ * * The table is 32KB in size and must be aligned on 32KB boundary. * (The CPU ignores the 15 lower bits of the address) * * Table is based upon EAS2.6 (Oct 1999) */ #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/break.h> #include <asm/kregs.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/errno.h> #include <asm/export.h> #if 0 # define PSR_DEFAULT_BITS psr.ac #else # define PSR_DEFAULT_BITS 0 #endif #if 0 /* * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't * needed for something else before enabling this... */ # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 #else # define DBG_FAULT(i) #endif #include "minstate.h" #define FAULT(n) \ mov r31=pr; \ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt EXPORT_DATA_SYMBOL(ia64_ivt) ia64_ivt: ///////////////////////////////////////////////////////////////////////////////////////// // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) ENTRY(vhpt_miss) DBG_FAULT(0) /* * The VHPT vector is invoked when the TLB entry for the virtual page table * is missing. This happens only as a result of a previous * (the "original") TLB miss, which may either be caused by an instruction * fetch or a data access (or non-access). * * What we do here is normal TLB miss handing for the _original_ miss, * followed by inserting the TLB entry for the virtual page table page * that the VHPT walker was attempting to access. The latter gets * inserted as long as page table entry above pte level have valid * mappings for the faulting address. The TLB entry for the original * miss gets inserted only if the pte entry indicates that the page is * present. * * do_page_fault gets invoked in the following cases: * - the faulting virtual address uses unimplemented address bits * - the faulting virtual address has no valid page table mapping */ MOV_FROM_IFA(r16) // get address that caused the TLB miss #ifdef CONFIG_HUGETLB_PAGE movl r18=PAGE_SHIFT MOV_FROM_ITIR(r25) #endif ;; RSM_PSR_DT // use physical addressing for data mov r31=pr // save the predicate registers mov r19=IA64_KR(PT_BASE) // get page table base address shl r21=r16,3 // shift bit 60 into sign bit shr.u r17=r16,61 // get the region number into r17 ;; shr.u r22=r21,3 #ifdef CONFIG_HUGETLB_PAGE extr.u r26=r25,2,6 ;; cmp.ne p8,p0=r18,r26 sub r27=r26,r18 ;; (p8) dep r25=r18,r25,2,6 (p8) shr r22=r22,r27 #endif ;; cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit ;; (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place srlz.d LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] cmp.eq p7,p6=0,r21 // unused address bits all zeroes? #if CONFIG_PGTABLE_LEVELS == 4 shr.u r28=r22,PUD_SHIFT // shift pud index into position #else shr.u r18=r22,PMD_SHIFT // shift pmd index into position #endif ;; ld8 r17=[r17] // get *pgd (may be 0) ;; (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? #if CONFIG_PGTABLE_LEVELS == 4 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) ;; shr.u r18=r22,PMD_SHIFT // shift pmd index into position (p7) ld8 r29=[r28] // get *pud (may be 0) ;; (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) #else dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) #endif ;; (p7) ld8 r20=[r17] // get *pmd (may be 0) shr.u r19=r22,PAGE_SHIFT // shift pte index into position ;; (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) ;; (p7) ld8 r18=[r21] // read *pte MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss ;; (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss ;; // avoid RAW on p7 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address ;; ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and // insert the data TLB entry (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) MOV_TO_IFA(r22, r24) #ifdef CONFIG_HUGETLB_PAGE MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT #endif /* * Now compute and insert the TLB entry for the virtual page table. We never * execute in a page table page so there is no need to set the exception deferral * bit. */ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 ;; ITC_D(p7, r24, r25) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data /* * Re-check pagetable entry. If they changed, we may have received a ptc.g * between reading the pagetable and the "itc". If so, flush the entry we * inserted and retry. At this point, we have: * * r28 = equivalent of pud_offset(pgd, ifa) * r17 = equivalent of pmd_offset(pud, ifa) * r21 = equivalent of pte_offset(pmd, ifa) * * r29 = *pud * r20 = *pmd * r18 = *pte */ ld8 r25=[r21] // read *pte again ld8 r26=[r17] // read *pmd again #if CONFIG_PGTABLE_LEVELS == 4 ld8 r19=[r28] // read *pud again #endif cmp.ne p6,p7=r0,r0 ;; cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change #if CONFIG_PGTABLE_LEVELS == 4 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change #endif mov r27=PAGE_SHIFT<<2 ;; (p6) ptc.l r22,r27 // purge PTE page translation (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change ;; (p6) ptc.l r16,r27 // purge translation #endif mov pr=r31,-1 // restore predicate registers RFI END(vhpt_miss) .org ia64_ivt+0x400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0400 Entry 1 (size 64 bundles) ITLB (21) ENTRY(itlb_miss) DBG_FAULT(1) /* * The ITLB handler accesses the PTE via the virtually mapped linear * page table. If a nested TLB miss occurs, we switch into physical * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates .itlb_fault: MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte ;; mov b0=r29 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; ITC_I(p0, r18, r19) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r19=[r17] // read *pte again and see if same mov r20=PAGE_SHIFT<<2 // setup page size for purge ;; cmp.ne p7,p0=r18,r19 ;; (p7) ptc.l r16,r20 #endif mov pr=r31,-1 RFI END(itlb_miss) .org ia64_ivt+0x0800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) ENTRY(dtlb_miss) DBG_FAULT(2) /* * The DTLB handler accesses the PTE via the virtually mapped linear * page table. If a nested TLB miss occurs, we switch into physical * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates dtlb_fault: MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte ;; mov b0=r29 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; ITC_D(p0, r18, r19) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r19=[r17] // read *pte again and see if same mov r20=PAGE_SHIFT<<2 // setup page size for purge ;; cmp.ne p7,p0=r18,r19 ;; (p7) ptc.l r16,r20 #endif mov pr=r31,-1 RFI END(dtlb_miss) .org ia64_ivt+0x0c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) ENTRY(alt_itlb_miss) DBG_FAULT(3) MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL MOV_FROM_IPSR(p0, r21) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) mov r31=pr ;; #ifdef CONFIG_DISABLE_VHPT shr.u r22=r16,61 // get the region number into r21 ;; cmp.gt p8,p0=6,r22 // user mode ;; THASH(p8, r17, r16, r23) ;; MOV_TO_IHA(p8, r17, r23) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk .itlb_fault #endif extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r19=r19,r16 // clear ed, reserved bits, and PTE control bits shr.u r18=r16,57 // move address bit 61 to bit 4 ;; andcm r18=0x10,r18 // bit 4=~address-bit(61) cmp.ne p8,p0=r0,r23 // psr.cpl != 0? or r19=r17,r19 // insert PTE control bits into r19 ;; or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 (p8) br.cond.spnt page_fault ;; ITC_I(p0, r19, r18) // insert the TLB entry mov pr=r31,-1 RFI END(alt_itlb_miss) .org ia64_ivt+0x1000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) ENTRY(alt_dtlb_miss) DBG_FAULT(4) MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL MOV_FROM_ISR(r20) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) MOV_FROM_IPSR(p0, r21) mov r31=pr mov r24=PERCPU_ADDR ;; #ifdef CONFIG_DISABLE_VHPT shr.u r22=r16,61 // get the region number into r21 ;; cmp.gt p8,p0=6,r22 // access to region 0-5 ;; THASH(p8, r17, r16, r25) ;; MOV_TO_IHA(p8, r17, r25) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk dtlb_fault #endif cmp.ge p10,p11=r16,r24 // access to per_cpu_data? tbit.z p12,p0=r16,61 // access to region 6? mov r25=PERCPU_PAGE_SHIFT << 2 mov r26=PERCPU_PAGE_SIZE nop.m 0 nop.b 0 ;; (p10) mov r19=IA64_KR(PER_CPU_DATA) (p11) and r19=r19,r16 // clear non-ppn fields extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; (p10) sub r19=r19,r26 MOV_TO_ITIR(p10, r25, r24) cmp.ne p8,p0=r0,r23 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr (p8) br.cond.spnt page_fault dep r21=-1,r21,IA64_PSR_ED_BIT,1 ;; or r19=r19,r17 // insert PTE control bits into r19 MOV_TO_IPSR(p6, r21, r24) ;; ITC_D(p7, r19, r18) // insert the TLB entry mov pr=r31,-1 RFI END(alt_dtlb_miss) .org ia64_ivt+0x1400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) ENTRY(nested_dtlb_miss) /* * In the absence of kernel bugs, we get here when the virtually mapped linear * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page * table is missing, a nested TLB miss fault is triggered and control is * transferred to this point. When this happens, we lookup the pte for the * faulting address by walking the page table in physical mode and return to the * continuation point passed in register r30 (or call page_fault if the address is * not mapped). * * Input: r16: faulting address * r29: saved b0 * r30: continuation address * r31: saved pr * * Output: r17: physical address of PTE of faulting address * r29: saved b0 * r30: continuation address * r31: saved pr * * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) */ RSM_PSR_DT // switch to using physical data addressing mov r19=IA64_KR(PT_BASE) // get the page table base address shl r21=r16,3 // shift bit 60 into sign bit MOV_FROM_ITIR(r18) ;; shr.u r17=r16,61 // get the region number into r17 extr.u r18=r18,2,6 // get the faulting page size ;; cmp.eq p6,p7=5,r17 // is faulting address in region 5? add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 ;; shr.u r22=r16,r22 shr.u r18=r16,r18 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place srlz.d LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] cmp.eq p7,p6=0,r21 // unused address bits all zeroes? #if CONFIG_PGTABLE_LEVELS == 4 shr.u r18=r22,PUD_SHIFT // shift pud index into position #else shr.u r18=r22,PMD_SHIFT // shift pmd index into position #endif ;; ld8 r17=[r17] // get *pgd (may be 0) ;; (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) ;; #if CONFIG_PGTABLE_LEVELS == 4 (p7) ld8 r17=[r17] // get *pud (may be 0) shr.u r18=r22,PMD_SHIFT // shift pmd index into position ;; (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) ;; #endif (p7) ld8 r17=[r17] // get *pmd (may be 0) shr.u r19=r22,PAGE_SHIFT // shift pte index into position ;; (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); (p6) br.cond.spnt page_fault mov b0=r30 br.sptk.many b0 // return to continuation point END(nested_dtlb_miss) .org ia64_ivt+0x1800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) ENTRY(ikey_miss) DBG_FAULT(6) FAULT(6) END(ikey_miss) .org ia64_ivt+0x1c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) ENTRY(dkey_miss) DBG_FAULT(7) FAULT(7) END(dkey_miss) .org ia64_ivt+0x2000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) ENTRY(dirty_bit) DBG_FAULT(8) /* * What we do here is to simply turn on the dirty bit in the PTE. We need to * update both the page-table and the TLB entry. To efficiently access the PTE, * we address it through the virtual page table. Most likely, the TLB entry for * the relevant virtual page table page is still present in the TLB so we can * normally do this without additional TLB misses. In case the necessary virtual * page table TLB entry isn't present, we take a nested TLB miss hit where we look * up the physical address of the L3 PTE and then continue at label 1 below. */ MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault mov r31=pr // save pr #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present ;; ITC_D(p6, r25, r18) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov b0=r29 // restore b0 mov ar.ccv=r28 #else ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits mov b0=r29 // restore b0 ;; st8 [r17]=r18 // store back updated PTE ITC_D(p0, r18, r16) // install updated PTE #endif mov pr=r31,-1 // restore pr RFI END(dirty_bit) .org ia64_ivt+0x2400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) ENTRY(iaccess_bit) DBG_FAULT(9) // Like Entry 8, except for instruction access MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault mov r31=pr // save predicates #ifdef CONFIG_ITANIUM /* * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. */ MOV_FROM_IPSR(p0, r17) ;; MOV_FROM_IIP(r18) tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? ;; (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa #endif /* CONFIG_ITANIUM */ ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_A,r18 // set the accessed bit tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page present ;; ITC_I(p6, r25, r26) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov b0=r29 // restore b0 mov ar.ccv=r28 #else /* !CONFIG_SMP */ ;; 1: ld8 r18=[r17] ;; or r18=_PAGE_A,r18 // set the accessed bit mov b0=r29 // restore b0 ;; st8 [r17]=r18 // store back updated PTE ITC_I(p0, r18, r16) // install updated PTE #endif /* !CONFIG_SMP */ mov pr=r31,-1 RFI END(iaccess_bit) .org ia64_ivt+0x2800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) ENTRY(daccess_bit) DBG_FAULT(10) // Like Entry 8, except for data access MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r31=pr mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_A,r18 // set the dirty bit tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page is present ;; ITC_D(p6, r25, r26) // install updated PTE /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ;; ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov ar.ccv=r28 #else ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 or r18=_PAGE_A,r18 // set the accessed bit ;; st8 [r17]=r18 // store back updated PTE ITC_D(p0, r18, r16) // install updated PTE #endif mov b0=r29 // restore b0 mov pr=r31,-1 RFI END(daccess_bit) .org ia64_ivt+0x2c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) ENTRY(break_fault) /* * The streamlined system call entry/exit paths only save/restore the initial part * of pt_regs. This implies that the callers of system-calls must adhere to the * normal procedure calling conventions. * * Registers to be saved & restored: * CR registers: cr.ipsr, cr.iip, cr.ifs * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 * Registers to be restored only: * r8-r11: output value from the system call. * * During system call exit, scratch registers (including r15) are modified/cleared * to prevent leaking bits from kernel to user level. */ DBG_FAULT(11) mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) MOV_FROM_IPSR(p0, r29) // M2 (12 cyc) mov r31=pr // I0 (2 cyc) MOV_FROM_IIM(r17) // M2 (2 cyc) mov.m r27=ar.rsc // M2 (12 cyc) mov r18=__IA64_BREAK_SYSCALL // A mov.m ar.rsc=0 // M2 mov.m r21=ar.fpsr // M2 (12 cyc) mov r19=b6 // I0 (2 cyc) ;; mov.m r23=ar.bspstore // M2 (12 cyc) mov.m r24=ar.rnat // M2 (5 cyc) mov.i r26=ar.pfs // I0 (2 cyc) invala // M0|1 nop.m 0 // M mov r20=r1 // A save r1 nop.m 0 movl r30=sys_call_table // X MOV_FROM_IIP(r28) // M2 (2 cyc) cmp.eq p0,p7=r18,r17 // I0 is this a system call? (p7) br.cond.spnt non_syscall // B no -> // // From this point on, we are definitely on the syscall-path // and we can use (non-banked) scratch registers. // /////////////////////////////////////////////////////////////////////// mov r1=r16 // A move task-pointer to "addl"-addressable reg mov r2=r16 // A setup r2 for ia64_syscall_setup add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 adds r15=-1024,r15 // A subtract 1024 from syscall number mov r3=NR_syscalls - 1 ;; ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS cmp.leu p6,p7=r15,r3 // A syscall number in range? ;; lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? mov.m ar.bspstore=r22 // M2 switch to kernel RBS cmp.eq p8,p9=2,r8 // A isr.ei==2? ;; (p8) mov r8=0 // A clear ei to 0 (p7) movl r30=sys_ni_syscall // X (p8) adds r28=16,r28 // A switch cr.iip to next bundle (p9) adds r8=1,r8 // A increment ei to next slot #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ;; mov b6=r30 // I0 setup syscall handler branch reg early #else nop.i 0 ;; #endif mov.m r25=ar.unat // M2 (5 cyc) dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr adds r15=1024,r15 // A restore original syscall number // // If any of the above loads miss in L1D, we'll stall here until // the data arrives. // /////////////////////////////////////////////////////////////////////// st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting #else mov b6=r30 // I0 setup syscall handler branch reg early #endif cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit mov r18=ar.bsp // M2 (12 cyc) (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS ;; .back_from_break_fixup: (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? br.call.sptk.many b7=ia64_syscall_setup // B 1: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mov.m r30=ar.itc is called in advance, and r13 is current add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A (pKStk) br.cond.spnt .skip_accounting // B unlikely skip ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave ;; ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime ld8 r21=[r17] // M cumulated utime sub r22=r19,r18 // A stime before leave ;; st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp sub r18=r30,r19 // A elapsed time in user ;; add r20=r20,r22 // A sum stime add r21=r21,r18 // A sum utime ;; st8 [r16]=r20 // M update stime st8 [r17]=r21 // M update utime ;; .skip_accounting: #endif mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 nop 0 BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 ;; SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection // M0 ensure interruption collection is on movl r3=ia64_ret_from_syscall // X ;; mov rp=r3 // I0 set the real return addr (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT SSM_PSR_I(p15, p15, r16) // M2 restore psr.i (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic // NOT REACHED /////////////////////////////////////////////////////////////////////// // On entry, we optimistically assumed that we're coming from user-space. // For the rare cases where a system-call is done from within the kernel, // we fix things up at this point: .break_fixup: add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure mov ar.rnat=r24 // M2 restore kernel's AR.RNAT ;; mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE br.cond.sptk .back_from_break_fixup END(break_fault) .org ia64_ivt+0x3000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) ENTRY(interrupt) /* interrupt handler has become too big to fit this area. */ br.sptk.many __interrupt END(interrupt) .org ia64_ivt+0x3400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3400 Entry 13 (size 64 bundles) Reserved DBG_FAULT(13) FAULT(13) .org ia64_ivt+0x3800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3800 Entry 14 (size 64 bundles) Reserved DBG_FAULT(14) FAULT(14) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... * * ia64_syscall_setup() is a separate subroutine so that it can * allocate stacked registers so it can safely demine any * potential NaT values from the input registers. * * On entry: * - executing on bank 0 or bank 1 register set (doesn't matter) * - r1: stack pointer * - r2: current task pointer * - r3: preserved * - r11: original contents (saved ar.pfs to be saved) * - r12: original contents (sp to be saved) * - r13: original contents (tp to be saved) * - r15: original contents (syscall # to be saved) * - r18: saved bsp (after switching to kernel stack) * - r19: saved b6 * - r20: saved r1 (gp) * - r21: saved ar.fpsr * - r22: kernel's register backing store base (krbs_base) * - r23: saved ar.bspstore * - r24: saved ar.rnat * - r25: saved ar.unat * - r26: saved ar.pfs * - r27: saved ar.rsc * - r28: saved cr.iip * - r29: saved cr.ipsr * - r30: ar.itc for accounting (don't touch) * - r31: saved pr * - b0: original contents (to be saved) * On exit: * - p10: TRUE if syscall is invoked with more than 8 out * registers or r15's Nat is true * - r1: kernel's gp * - r3: preserved (same as on entry) * - r8: -EINVAL if p10 is true * - r12: points to kernel stack * - r13: points to current task * - r14: preserved (same as on entry) * - p13: preserved * - p15: TRUE if interrupts need to be re-enabled * - ar.fpsr: set to kernel settings * - b6: preserved (same as on entry) */ GLOBAL_ENTRY(ia64_syscall_setup) #if PT(B6) != 0 # error This code assumes that b6 is the first field in pt_regs. #endif st8 [r1]=r19 // save b6 add r16=PT(CR_IPSR),r1 // initialize first base pointer add r17=PT(R11),r1 // initialize second base pointer ;; alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr tnat.nz p8,p0=in0 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 tnat.nz p9,p0=in1 (pKStk) mov r18=r0 // make sure r18 isn't NaT ;; st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip mov r28=b0 // save b0 (2 cyc) ;; st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] (p8) mov in0=-1 ;; st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs extr.u r11=r19,7,7 // I0 // get sol of ar.pfs and r8=0x7f,r19 // A // get sof of ar.pfs st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 (p9) mov in1=-1 ;; (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 tnat.nz p10,p0=in2 add r11=8,r11 ;; (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field tnat.nz p11,p0=in3 ;; (p10) mov in2=-1 tnat.nz p12,p0=in4 // [I0] (p11) mov in3=-1 ;; (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore shl r18=r18,16 // compute ar.rsc to be used for "loadrs" ;; st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates st8 [r17]=r28,PT(R1)-PT(B0) // save b0 tnat.nz p13,p0=in5 // [I0] ;; st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 (p12) mov in4=-1 ;; .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 (p13) mov in5=-1 ;; st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr tnat.nz p13,p0=in6 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 ;; mov r8=1 (p9) tnat.nz p10,p0=r15 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) st8.spill [r17]=r15 // save r15 tnat.nz p8,p0=in7 nop.i 0 mov r13=r2 // establish `current' movl r1=__gp // establish kernel global pointer ;; st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) (p13) mov in6=-1 (p8) mov in7=-1 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 movl r17=FPSR_DEFAULT ;; mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value (p10) mov r8=-EINVAL br.ret.sptk.many b7 END(ia64_syscall_setup) .org ia64_ivt+0x3c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3c00 Entry 15 (size 64 bundles) Reserved DBG_FAULT(15) FAULT(15) .org ia64_ivt+0x4000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4000 Entry 16 (size 64 bundles) Reserved DBG_FAULT(16) FAULT(16) #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) /* * There is no particular reason for this code to be here, other than * that there happens to be space here that would go unused otherwise. * If this fault ever gets "unreserved", simply moved the following * code to a more suitable spot... * * account_sys_enter is called from SAVE_MIN* macros if accounting is * enabled and if the macro is entered from user mode. */ GLOBAL_ENTRY(account_sys_enter) // mov.m r20=ar.itc is called in advance, and r13 is current add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel ;; ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime ld8 r21=[r17] // cumulated utime sub r22=r19,r18 // stime before leave kernel ;; st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp sub r18=r20,r19 // elapsed time in user mode ;; add r23=r23,r22 // sum stime add r21=r21,r18 // sum utime ;; st8 [r16]=r23 // update stime st8 [r17]=r21 // update utime ;; br.ret.sptk.many rp END(account_sys_enter) #endif .org ia64_ivt+0x4400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4400 Entry 17 (size 64 bundles) Reserved DBG_FAULT(17) FAULT(17) .org ia64_ivt+0x4800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4800 Entry 18 (size 64 bundles) Reserved DBG_FAULT(18) FAULT(18) .org ia64_ivt+0x4c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4c00 Entry 19 (size 64 bundles) Reserved DBG_FAULT(19) FAULT(19) // // --- End of long entries, Beginning of short entries // .org ia64_ivt+0x5000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) ENTRY(page_not_present) DBG_FAULT(20) MOV_FROM_IFA(r16) RSM_PSR_DT /* * The Linux page fault handler doesn't expect non-present pages to be in * the TLB. Flush the existing entry now, so we meet that expectation. */ mov r17=PAGE_SHIFT<<2 ;; ptc.l r16,r17 ;; mov r31=pr srlz.d br.sptk.many page_fault END(page_not_present) .org ia64_ivt+0x5100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) ENTRY(key_permission) DBG_FAULT(21) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(key_permission) .org ia64_ivt+0x5200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) ENTRY(iaccess_rights) DBG_FAULT(22) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(iaccess_rights) .org ia64_ivt+0x5300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) ENTRY(daccess_rights) DBG_FAULT(23) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(daccess_rights) .org ia64_ivt+0x5400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) ENTRY(general_exception) DBG_FAULT(24) MOV_FROM_ISR(r16) mov r31=pr ;; cmp4.eq p6,p0=0,r16 (p6) br.sptk.many dispatch_illegal_op_fault ;; mov r19=24 // fault number br.sptk.many dispatch_to_fault_handler END(general_exception) .org ia64_ivt+0x5500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) ENTRY(disabled_fp_reg) DBG_FAULT(25) rsm psr.dfh // ensure we can access fph ;; srlz.d mov r31=pr mov r19=25 br.sptk.many dispatch_to_fault_handler END(disabled_fp_reg) .org ia64_ivt+0x5600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) ENTRY(nat_consumption) DBG_FAULT(26) MOV_FROM_IPSR(p0, r16) MOV_FROM_ISR(r17) mov r31=pr // save PR ;; and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} tbit.z p6,p0=r17,IA64_ISR_NA_BIT ;; cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 dep r16=-1,r16,IA64_PSR_ED_BIT,1 (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) ;; MOV_TO_IPSR(p0, r16, r18) mov pr=r31,-1 ;; RFI 1: mov pr=r31,-1 ;; FAULT(26) END(nat_consumption) .org ia64_ivt+0x5700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5700 Entry 27 (size 16 bundles) Speculation (40) ENTRY(speculation_vector) DBG_FAULT(27) /* * A [f]chk.[as] instruction needs to take the branch to the recovery code but * this part of the architecture is not implemented in hardware on some CPUs, such * as Itanium. Thus, in general we need to emulate the behavior. IIM contains * the relative target (not yet sign extended). So after sign extending it we * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, * i.e., the slot to restart into. * * cr.imm contains zero_ext(imm21) */ MOV_FROM_IIM(r18) ;; MOV_FROM_IIP(r17) shl r18=r18,43 // put sign bit in position (43=64-21) ;; MOV_FROM_IPSR(p0, r16) shr r18=r18,39 // sign extend (39=43-4) ;; add r17=r17,r18 // now add the offset ;; MOV_TO_IIP(r17, r19) dep r16=0,r16,41,2 // clear EI ;; MOV_TO_IPSR(p0, r16, r19) ;; RFI END(speculation_vector) .org ia64_ivt+0x5800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5800 Entry 28 (size 16 bundles) Reserved DBG_FAULT(28) FAULT(28) .org ia64_ivt+0x5900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) ENTRY(debug_vector) DBG_FAULT(29) FAULT(29) END(debug_vector) .org ia64_ivt+0x5a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) ENTRY(unaligned_access) DBG_FAULT(30) mov r31=pr // prepare to save predicates ;; br.sptk.many dispatch_unaligned_handler END(unaligned_access) .org ia64_ivt+0x5b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) ENTRY(unsupported_data_reference) DBG_FAULT(31) FAULT(31) END(unsupported_data_reference) .org ia64_ivt+0x5c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) ENTRY(floating_point_fault) DBG_FAULT(32) FAULT(32) END(floating_point_fault) .org ia64_ivt+0x5d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) ENTRY(floating_point_trap) DBG_FAULT(33) FAULT(33) END(floating_point_trap) .org ia64_ivt+0x5e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) ENTRY(lower_privilege_trap) DBG_FAULT(34) FAULT(34) END(lower_privilege_trap) .org ia64_ivt+0x5f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) ENTRY(taken_branch_trap) DBG_FAULT(35) FAULT(35) END(taken_branch_trap) .org ia64_ivt+0x6000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) ENTRY(single_step_trap) DBG_FAULT(36) FAULT(36) END(single_step_trap) .org ia64_ivt+0x6100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6100 Entry 37 (size 16 bundles) Reserved DBG_FAULT(37) FAULT(37) .org ia64_ivt+0x6200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6200 Entry 38 (size 16 bundles) Reserved DBG_FAULT(38) FAULT(38) .org ia64_ivt+0x6300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6300 Entry 39 (size 16 bundles) Reserved DBG_FAULT(39) FAULT(39) .org ia64_ivt+0x6400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6400 Entry 40 (size 16 bundles) Reserved DBG_FAULT(40) FAULT(40) .org ia64_ivt+0x6500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6500 Entry 41 (size 16 bundles) Reserved DBG_FAULT(41) FAULT(41) .org ia64_ivt+0x6600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6600 Entry 42 (size 16 bundles) Reserved DBG_FAULT(42) FAULT(42) .org ia64_ivt+0x6700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6700 Entry 43 (size 16 bundles) Reserved DBG_FAULT(43) FAULT(43) .org ia64_ivt+0x6800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6800 Entry 44 (size 16 bundles) Reserved DBG_FAULT(44) FAULT(44) .org ia64_ivt+0x6900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) ENTRY(ia32_exception) DBG_FAULT(45) FAULT(45) END(ia32_exception) .org ia64_ivt+0x6a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) ENTRY(ia32_intercept) DBG_FAULT(46) FAULT(46) END(ia32_intercept) .org ia64_ivt+0x6b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) ENTRY(ia32_interrupt) DBG_FAULT(47) FAULT(47) END(ia32_interrupt) .org ia64_ivt+0x6c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6c00 Entry 48 (size 16 bundles) Reserved DBG_FAULT(48) FAULT(48) .org ia64_ivt+0x6d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6d00 Entry 49 (size 16 bundles) Reserved DBG_FAULT(49) FAULT(49) .org ia64_ivt+0x6e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6e00 Entry 50 (size 16 bundles) Reserved DBG_FAULT(50) FAULT(50) .org ia64_ivt+0x6f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6f00 Entry 51 (size 16 bundles) Reserved DBG_FAULT(51) FAULT(51) .org ia64_ivt+0x7000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7000 Entry 52 (size 16 bundles) Reserved DBG_FAULT(52) FAULT(52) .org ia64_ivt+0x7100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7100 Entry 53 (size 16 bundles) Reserved DBG_FAULT(53) FAULT(53) .org ia64_ivt+0x7200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7200 Entry 54 (size 16 bundles) Reserved DBG_FAULT(54) FAULT(54) .org ia64_ivt+0x7300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7300 Entry 55 (size 16 bundles) Reserved DBG_FAULT(55) FAULT(55) .org ia64_ivt+0x7400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7400 Entry 56 (size 16 bundles) Reserved DBG_FAULT(56) FAULT(56) .org ia64_ivt+0x7500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7500 Entry 57 (size 16 bundles) Reserved DBG_FAULT(57) FAULT(57) .org ia64_ivt+0x7600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7600 Entry 58 (size 16 bundles) Reserved DBG_FAULT(58) FAULT(58) .org ia64_ivt+0x7700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7700 Entry 59 (size 16 bundles) Reserved DBG_FAULT(59) FAULT(59) .org ia64_ivt+0x7800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7800 Entry 60 (size 16 bundles) Reserved DBG_FAULT(60) FAULT(60) .org ia64_ivt+0x7900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7900 Entry 61 (size 16 bundles) Reserved DBG_FAULT(61) FAULT(61) .org ia64_ivt+0x7a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7a00 Entry 62 (size 16 bundles) Reserved DBG_FAULT(62) FAULT(62) .org ia64_ivt+0x7b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7b00 Entry 63 (size 16 bundles) Reserved DBG_FAULT(63) FAULT(63) .org ia64_ivt+0x7c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7c00 Entry 64 (size 16 bundles) Reserved DBG_FAULT(64) FAULT(64) .org ia64_ivt+0x7d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7d00 Entry 65 (size 16 bundles) Reserved DBG_FAULT(65) FAULT(65) .org ia64_ivt+0x7e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7e00 Entry 66 (size 16 bundles) Reserved DBG_FAULT(66) FAULT(66) .org ia64_ivt+0x7f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7f00 Entry 67 (size 16 bundles) Reserved DBG_FAULT(67) FAULT(67) //----------------------------------------------------------------------------------- // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) ENTRY(page_fault) SSM_PSR_DT_AND_SRLZ_I ;; SAVE_MIN_WITH_COVER alloc r15=ar.pfs,0,0,3,0 MOV_FROM_IFA(out0) MOV_FROM_ISR(out1) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3) adds r3=8,r2 // set up second base pointer SSM_PSR_I(p15, p15, r14) // restore psr.i movl r14=ia64_leave_kernel ;; SAVE_REST mov rp=r14 ;; adds out2=16,r12 // out2 = pointer to pt_regs br.call.sptk.many b6=ia64_do_page_fault // ignore return address END(page_fault) ENTRY(non_syscall) mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER ;; SAVE_MIN_WITH_COVER // There is no particular reason for this code to be here, other than that // there happens to be space here that would go unused otherwise. If this // fault ever gets "unreserved", simply moved the following code to a more // suitable spot... alloc r14=ar.pfs,0,0,2,0 MOV_FROM_IIM(out0) add out1=16,sp adds r3=8,r2 // set up second base pointer for SAVE_REST SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24) // guarantee that interruption collection is on SSM_PSR_I(p15, p15, r15) // restore psr.i movl r15=ia64_leave_kernel ;; SAVE_REST mov rp=r15 ;; br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr END(non_syscall) ENTRY(__interrupt) DBG_FAULT(12) mov r31=pr // prepare to save predicates ;; SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14) // ensure everybody knows psr.ic is back on adds r3=8,r2 // set up second base pointer for SAVE_REST ;; SAVE_REST ;; MCA_RECOVER_RANGE(interrupt) alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg add out1=16,sp // pass pointer to pt_regs as second arg ;; srlz.d // make sure we see the effect of cr.ivr movl r14=ia64_leave_kernel ;; mov rp=r14 br.call.sptk.many b6=ia64_handle_irq END(__interrupt) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ ENTRY(dispatch_unaligned_handler) SAVE_MIN_WITH_COVER ;; alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) MOV_FROM_IFA(out0) adds out1=16,sp SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) // guarantee that interruption collection is on SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.sptk.many ia64_prepare_handle_unaligned END(dispatch_unaligned_handler) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ ENTRY(dispatch_to_fault_handler) /* * Input: * psr.ic: off * r19: fault vector number (e.g., 24 for General Exception) * r31: contains saved predicates (pr) */ SAVE_MIN_WITH_COVER_R19 alloc r14=ar.pfs,0,0,5,0 MOV_FROM_ISR(out1) MOV_FROM_IFA(out2) MOV_FROM_IIM(out3) MOV_FROM_ITIR(out4) ;; SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0) // guarantee that interruption collection is on mov out0=r15 ;; SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.call.sptk.many b6=ia64_fault END(dispatch_to_fault_handler) /* * Squatting in this space ... * * This special case dispatcher for illegal operation faults allows preserved * registers to be modified through a callback function (asm only) that is handed * back from the fault handler in r8. Up to three arguments can be passed to the * callback function by returning an aggregate with the callback as its first * element, followed by the arguments. */ ENTRY(dispatch_illegal_op_fault) .prologue .body SAVE_MIN_WITH_COVER SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) // guarantee that interruption collection is on ;; SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; alloc r14=ar.pfs,0,0,1,0 // must be first in insn group mov out0=ar.ec ;; SAVE_REST PT_REGS_UNWIND_INFO(0) ;; br.call.sptk.many rp=ia64_illegal_op_fault .ret0: ;; alloc r14=ar.pfs,0,0,3,0 // must be first in insn group mov out0=r9 mov out1=r10 mov out2=r11 movl r15=ia64_leave_kernel ;; mov rp=r15 mov b6=r8 ;; cmp.ne p6,p0=0,r8 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel br.sptk.many ia64_leave_kernel END(dispatch_illegal_op_fault)
aixcc-public/challenge-001-exemplar-source
2,685
arch/ia64/kernel/efi_stub.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub. * * Copyright (C) 1999-2001 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. We need this because we can't call SetVirtualMap() until * the kernel has booted far enough to allow allocation of struct vm_area_struct * entries (which we would need to map stuff with memory attributes other * than uncached or writeback...). Since the GetTime() service gets called * earlier than that, we need to be able to make physical mode EFI calls from * the kernel. */ /* * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System * Abstraction Layer Specification", revision 2.6e). Note that * psr.dfl and psr.dfh MUST be cleared, despite what this manual says. * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call * (the br.ia instruction fails unless psr.dfl and psr.dfh are * cleared). Fortunately, SAL promises not to touch the floating * point regs, so at least we don't have to save f2-f127. */ #define PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PSR_BITS_TO_SET \ (IA64_PSR_BN) #include <asm/processor.h> #include <asm/asmmacro.h> /* * Inputs: * in0 = address of function descriptor of EFI routine to call * in1..in7 = arguments to routine * * Outputs: * r8 = EFI_STATUS returned by called function */ GLOBAL_ENTRY(efi_call_phys) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,7,7,0 ld8 r2=[in0],8 // load EFI function's entry point mov loc0=rp .body ;; mov loc2=gp // save global pointer mov loc4=ar.rsc // save RSE configuration mov ar.rsc=0 // put RSE in enforced lazy, LE mode ;; ld8 gp=[in0] // load EFI function's global pointer movl r16=PSR_BITS_TO_CLEAR mov loc3=psr // save processor status word movl r17=PSR_BITS_TO_SET ;; or loc3=loc3,r17 mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov out4=in5 mov out0=in1 mov out1=in2 mov out2=in3 mov out3=in4 mov out5=in6 mov out6=in7 mov loc5=r19 mov loc6=r20 br.call.sptk.many rp=b6 // call the EFI function .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration mov ar.pfs=loc1 mov rp=loc0 mov gp=loc2 br.ret.sptk.many rp END(efi_call_phys)
aixcc-public/challenge-001-exemplar-source
27,971
arch/ia64/kernel/mca_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * File: mca_asm.S * Purpose: assembly portion of the IA64 MCA handling * * Mods by cfleck to integrate into kernel build * * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com> * Added various stop bits to get a clean compile * * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> * Added code to save INIT handoff state in pt_regs format, * switch to temp kstack, switch modes, jump to C INIT handler * * 2002-01-04 J.Hall <jenna.s.hall@intel.com> * Before entering virtual mode code: * 1. Check for TLB CPU error * 2. Restore current thread pointer to kr6 * 3. Move stack ptr 16 bytes to conform to C calling convention * * 2004-11-12 Russ Anderson <rja@sgi.com> * Added per cpu MCA/INIT stack save areas. * * 2005-12-08 Keith Owens <kaos@sgi.com> * Use per cpu MCA/INIT stacks for all data. */ #include <linux/threads.h> #include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/mca_asm.h> #include <asm/mca.h> #include "entry.h" #define GET_IA64_MCA_DATA(reg) \ GET_THIS_PADDR(reg, ia64_mca_data) \ ;; \ ld8 reg=[reg] .global ia64_do_tlb_purge .global ia64_os_mca_dispatch .global ia64_os_init_on_kdump .global ia64_os_init_dispatch_monarch .global ia64_os_init_dispatch_slave .text .align 16 //StartMain//////////////////////////////////////////////////////////////////// /* * Just the TLB purge part is moved to a separate function * so we can re-use the code for cpu hotplug code as well * Caller should now setup b1, so we can branch once the * tlb flush is complete. */ ia64_do_tlb_purge: #define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 ;; addl r17=O(PTCE_STRIDE),r2 addl r2=O(PTCE_BASE),r2 ;; ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base ld4 r19=[r2],4 // r19=ptce_count[0] ld4 r21=[r17],4 // r21=ptce_stride[0] ;; ld4 r20=[r2] // r20=ptce_count[1] ld4 r22=[r17] // r22=ptce_stride[1] mov r24=0 ;; adds r20=-1,r20 ;; #undef O 2: cmp.ltu p6,p7=r24,r19 (p7) br.cond.dpnt.few 4f mov ar.lc=r20 3: ptc.e r18 ;; add r18=r22,r18 br.cloop.sptk.few 3b ;; add r18=r21,r18 add r24=1,r24 ;; br.sptk.few 2b 4: srlz.i // srlz.i implies srlz.d ;; // Now purge addresses formerly mapped by TR registers // 1. Purge ITR&DTR for kernel. movl r16=KERNEL_START mov r18=KERNEL_TR_PAGE_SHIFT<<2 ;; ptr.i r16, r18 ptr.d r16, r18 ;; srlz.i ;; srlz.d ;; // 3. Purge ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.i r16,r18 ;; srlz.i ;; // 4. Purge DTR for stack. mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r16=r19,r16 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.i ;; // Now branch away to caller. br.sptk.many b1 ;; //EndMain////////////////////////////////////////////////////////////////////// //StartMain//////////////////////////////////////////////////////////////////// ia64_os_mca_dispatch: mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address mov r19=1 // All MCA events are treated as monarch (for now) br.sptk ia64_state_save // save the state that is not in minstate 1: GET_IA64_MCA_DATA(r2) // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param ;; add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2 ;; ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. ;; tbit.nz p6,p7=r18,60 (p7) br.spnt done_tlb_purge_and_reload // The following code purges TC and TR entries. Then reload all TC entries. // Purge percpu data TC entries. begin_tlb_purge_and_reload: movl r18=ia64_reload_tr;; LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; mov b1=r18;; br.sptk.many ia64_do_tlb_purge;; ia64_reload_tr: // Finally reload the TR registers. // 1. Reload DTR/ITR registers for kernel. mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL mov r19=ip movl r18=PAGE_KERNEL ;; dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT ;; or r18=r17,r18 ;; itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 ;; srlz.i srlz.d ;; // 3. Reload ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_pte) ;; ld8 r18=[r2] // load PAL PTE ;; GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] // load PAL vaddr mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r16 mov r20=IA64_TR_PALCODE ;; itr.i itr[r20]=r18 ;; srlz.i ;; // 4. Reload DTR for stack. mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r18=r19,r16 movl r20=PAGE_KERNEL ;; add r16=r20,r16 mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r18 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r16 GET_THIS_PADDR(r2, ia64_mca_tr_reload) mov r18 = 1 ;; srlz.d ;; st8 [r2] =r18 ;; done_tlb_purge_and_reload: // switch to per cpu MCA stack mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_new_stack 1: // everything saved, now we can set the kernel registers mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_set_kernel_registers 1: // This must be done in physical mode GET_IA64_MCA_DATA(r2) ;; mov r7=r2 // Enter virtual mode from physical mode VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) // This code returns to SAL via SOS r2, in general SAL has no unwind // data. To get a clean termination when backtracing the C MCA/INIT // handler, set a dummy return address of 0 in this routine. That // requires that ia64_os_mca_virtual_begin be a global function. ENTRY(ia64_os_mca_virtual_begin) .prologue .save rp,r0 .body mov ar.rsc=3 // set eager mode for C handler mov r2=r7 // see GET_IA64_MCA_DATA above ;; // Call virtual mode handler alloc r14=ar.pfs,0,0,3,0 ;; DATA_PA_TO_VA(r2,r7) ;; add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 br.call.sptk.many b0=ia64_mca_handler // Revert back to physical mode before going back to SAL PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) ia64_os_mca_virtual_end: END(ia64_os_mca_virtual_begin) // switch back to previous stack alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_old_stack 1: mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_restore // restore the SAL state 1: mov b0=r12 // SAL_CHECK return address br b0 //EndMain////////////////////////////////////////////////////////////////////// //StartMain//////////////////////////////////////////////////////////////////// // // NOP init handler for kdump. In panic situation, we may receive INIT // while kernel transition. Since we initialize registers on leave from // current kernel, no longer monarch/slave handlers of current kernel in // virtual mode are called safely. // We can unregister these init handlers from SAL, however then the INIT // will result in warmboot by SAL and we cannot retrieve the crashdump. // Therefore register this NOP function to SAL, to prevent entering virtual // mode and resulting warmboot by SAL. // ia64_os_init_on_kdump: mov r8=r0 // IA64_INIT_RESUME mov r9=r10 // SAL_GP mov r22=r17 // *minstate ;; mov r10=r0 // return to same context mov b0=r12 // SAL_CHECK return address br b0 // // SAL to OS entry point for INIT on all processors. This has been defined for // registration purposes with SAL as a part of ia64_mca_init. Monarch and // slave INIT have identical processing, except for the value of the // sos->monarch flag in r19. // ia64_os_init_dispatch_monarch: mov r19=1 // Bow, bow, ye lower middle classes! br.sptk ia64_os_init_dispatch ia64_os_init_dispatch_slave: mov r19=0 // <igor>yeth, mathter</igor> ia64_os_init_dispatch: mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_save // save the state that is not in minstate 1: // switch to per cpu INIT stack mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_new_stack 1: // everything saved, now we can set the kernel registers mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_set_kernel_registers 1: // This must be done in physical mode GET_IA64_MCA_DATA(r2) ;; mov r7=r2 // Enter virtual mode from physical mode VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) // This code returns to SAL via SOS r2, in general SAL has no unwind // data. To get a clean termination when backtracing the C MCA/INIT // handler, set a dummy return address of 0 in this routine. That // requires that ia64_os_init_virtual_begin be a global function. ENTRY(ia64_os_init_virtual_begin) .prologue .save rp,r0 .body mov ar.rsc=3 // set eager mode for C handler mov r2=r7 // see GET_IA64_MCA_DATA above ;; // Call virtual mode handler alloc r14=ar.pfs,0,0,3,0 ;; DATA_PA_TO_VA(r2,r7) ;; add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 br.call.sptk.many b0=ia64_init_handler // Revert back to physical mode before going back to SAL PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) ia64_os_init_virtual_end: END(ia64_os_init_virtual_begin) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_restore // restore the SAL state 1: // switch back to previous stack alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_old_stack 1: mov b0=r12 // SAL_CHECK return address br b0 //EndMain////////////////////////////////////////////////////////////////////// // common defines for the stubs #define ms r4 #define regs r5 #define temp1 r2 /* careful, it overlaps with input registers */ #define temp2 r3 /* careful, it overlaps with input registers */ #define temp3 r7 #define temp4 r14 //++ // Name: // ia64_state_save() // // Stub Description: // // Save the state that is not in minstate. This is sensitive to the layout of // struct ia64_sal_os_state in mca.h. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // The OS to SAL section of struct ia64_sal_os_state is set to a default // value of cold boot (MCA) or warm boot (INIT) and return to the same // context. ia64_sal_os_state is also used to hold some registers that // need to be saved and restored across the stack switches. // // Most input registers to this stub come from PAL/SAL // r1 os gp, physical // r8 pal_proc entry point // r9 sal_proc entry point // r10 sal gp // r11 MCA - rendevzous state, INIT - reason code // r12 sal return address // r17 pal min_state // r18 processor state parameter // r19 monarch flag, set by the caller of this routine // // In addition to the SAL to OS state, this routine saves all the // registers that appear in struct pt_regs and struct switch_stack, // excluding those that are already in the PAL minstate area. This // results in a partial pt_regs and switch_stack, the C code copies the // remaining registers from PAL minstate to pt_regs and switch_stack. The // resulting structures contain all the state of the original process when // MCA/INIT occurred. // //-- ia64_state_save: add regs=MCA_SOS_OFFSET, r3 add ms=MCA_SOS_OFFSET+8, r3 mov b0=r2 // save return address cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 ;; GET_IA64_MCA_DATA(temp2) ;; add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack ;; mov regs=temp1 // save the start of sos st8 [temp1]=r1,16 // os_gp st8 [temp2]=r8,16 // pal_proc ;; st8 [temp1]=r9,16 // sal_proc st8 [temp2]=r11,16 // rv_rc mov r11=cr.iipa ;; st8 [temp1]=r18 // proc_state_param st8 [temp2]=r19 // monarch mov r6=IA64_KR(CURRENT) add temp1=SOS(SAL_RA), regs add temp2=SOS(SAL_GP), regs ;; st8 [temp1]=r12,16 // sal_ra st8 [temp2]=r10,16 // sal_gp mov r12=cr.isr ;; st8 [temp1]=r17,16 // pal_min_state st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT mov r6=IA64_KR(CURRENT_STACK) ;; st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK st8 [temp2]=r0,16 // prev_task, starts off as NULL mov r6=cr.ifa ;; st8 [temp1]=r12,16 // cr.isr st8 [temp2]=r6,16 // cr.ifa mov r12=cr.itir ;; st8 [temp1]=r12,16 // cr.itir st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; st8 [temp1]=r12 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT mov r6=cr.iha add temp1=SOS(OS_STATUS), regs ;; st8 [temp2]=r6 // cr.iha add temp2=SOS(CONTEXT), regs st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; st8 [temp2]=r6 // context, default is same context // Save the pt_regs data that is not in minstate. The previous code // left regs at sos. add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs ;; add temp1=PT(B6), regs mov temp3=b6 mov temp4=b7 add temp2=PT(B7), regs ;; st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 mov temp3=ar.csd mov temp4=ar.ssd cover // must be last in group ;; st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd mov temp3=ar.unat mov temp4=ar.pfs ;; st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs mov temp3=ar.rnat mov temp4=ar.bspstore ;; st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore mov temp3=ar.bsp ;; sub temp3=temp3, temp4 // ar.bsp - ar.bspstore mov temp4=ar.fpsr ;; shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" ;; st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr mov temp3=ar.ccv ;; st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv stf.spill [temp2]=f6,PT(F8)-PT(F6) ;; stf.spill [temp1]=f7,PT(F9)-PT(F7) stf.spill [temp2]=f8,PT(F10)-PT(F8) ;; stf.spill [temp1]=f9,PT(F11)-PT(F9) stf.spill [temp2]=f10 ;; stf.spill [temp1]=f11 // Save the switch_stack data that is not in minstate nor pt_regs. The // previous code left regs at pt_regs. add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs ;; add temp1=SW(F2), regs add temp2=SW(F3), regs ;; stf.spill [temp1]=f2,32 stf.spill [temp2]=f3,32 ;; stf.spill [temp1]=f4,32 stf.spill [temp2]=f5,32 ;; stf.spill [temp1]=f12,32 stf.spill [temp2]=f13,32 ;; stf.spill [temp1]=f14,32 stf.spill [temp2]=f15,32 ;; stf.spill [temp1]=f16,32 stf.spill [temp2]=f17,32 ;; stf.spill [temp1]=f18,32 stf.spill [temp2]=f19,32 ;; stf.spill [temp1]=f20,32 stf.spill [temp2]=f21,32 ;; stf.spill [temp1]=f22,32 stf.spill [temp2]=f23,32 ;; stf.spill [temp1]=f24,32 stf.spill [temp2]=f25,32 ;; stf.spill [temp1]=f26,32 stf.spill [temp2]=f27,32 ;; stf.spill [temp1]=f28,32 stf.spill [temp2]=f29,32 ;; stf.spill [temp1]=f30,SW(B2)-SW(F30) stf.spill [temp2]=f31,SW(B3)-SW(F31) mov temp3=b2 mov temp4=b3 ;; st8 [temp1]=temp3,16 // save b2 st8 [temp2]=temp4,16 // save b3 mov temp3=b4 mov temp4=b5 ;; st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 st8 [temp2]=temp4 // save b5 mov temp3=ar.lc ;; st8 [temp1]=temp3 // save ar.lc // FIXME: Some proms are incorrectly accessing the minstate area as // cached data. The C code uses region 6, uncached virtual. Ensure // that there is no cache data lying around for the first 1K of the // minstate area. // Remove this code in September 2006, that gives platforms a year to // fix their proms and get their customers updated. add r1=32*1,r17 add r2=32*2,r17 add r3=32*3,r17 add r4=32*4,r17 add r5=32*5,r17 add r6=32*6,r17 add r7=32*7,r17 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_state_restore() // // Stub Description: // // Restore the SAL/OS state. This is sensitive to the layout of struct // ia64_sal_os_state in mca.h. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // In addition to the SAL to OS state, this routine restores all the // registers that appear in struct pt_regs and struct switch_stack, // excluding those in the PAL minstate area. // //-- ia64_state_restore: // Restore the switch_stack data that is not in minstate nor pt_regs. add regs=MCA_SWITCH_STACK_OFFSET, r3 mov b0=r2 // save return address ;; GET_IA64_MCA_DATA(temp2) ;; add regs=temp2, regs ;; add temp1=SW(F2), regs add temp2=SW(F3), regs ;; ldf.fill f2=[temp1],32 ldf.fill f3=[temp2],32 ;; ldf.fill f4=[temp1],32 ldf.fill f5=[temp2],32 ;; ldf.fill f12=[temp1],32 ldf.fill f13=[temp2],32 ;; ldf.fill f14=[temp1],32 ldf.fill f15=[temp2],32 ;; ldf.fill f16=[temp1],32 ldf.fill f17=[temp2],32 ;; ldf.fill f18=[temp1],32 ldf.fill f19=[temp2],32 ;; ldf.fill f20=[temp1],32 ldf.fill f21=[temp2],32 ;; ldf.fill f22=[temp1],32 ldf.fill f23=[temp2],32 ;; ldf.fill f24=[temp1],32 ldf.fill f25=[temp2],32 ;; ldf.fill f26=[temp1],32 ldf.fill f27=[temp2],32 ;; ldf.fill f28=[temp1],32 ldf.fill f29=[temp2],32 ;; ldf.fill f30=[temp1],SW(B2)-SW(F30) ldf.fill f31=[temp2],SW(B3)-SW(F31) ;; ld8 temp3=[temp1],16 // restore b2 ld8 temp4=[temp2],16 // restore b3 ;; mov b2=temp3 mov b3=temp4 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 ld8 temp4=[temp2] // restore b5 ;; mov b4=temp3 mov b5=temp4 ld8 temp3=[temp1] // restore ar.lc ;; mov ar.lc=temp3 // Restore the pt_regs data that is not in minstate. The previous code // left regs at switch_stack. add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs ;; add temp1=PT(B6), regs add temp2=PT(B7), regs ;; ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 ;; mov b6=temp3 mov b7=temp4 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd ;; mov ar.csd=temp3 mov ar.ssd=temp4 ld8 temp3=[temp1] // restore ar.unat add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs ;; mov ar.unat=temp3 mov ar.pfs=temp4 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr ;; mov ar.ccv=temp3 mov ar.fpsr=temp4 ldf.fill f6=[temp1],PT(F8)-PT(F6) ldf.fill f7=[temp2],PT(F9)-PT(F7) ;; ldf.fill f8=[temp1],PT(F10)-PT(F8) ldf.fill f9=[temp2],PT(F11)-PT(F9) ;; ldf.fill f10=[temp1] ldf.fill f11=[temp2] // Restore the SAL to OS state. The previous code left regs at pt_regs. add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs ;; add temp1=SOS(SAL_RA), regs add temp2=SOS(SAL_GP), regs ;; ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp ;; ld8 r22=[temp1],16 // pal_min_state, virtual ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT ;; ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK ld8 r20=[temp2],16 // prev_task ;; ld8 temp3=[temp1],16 // cr.isr ld8 temp4=[temp2],16 // cr.ifa ;; mov cr.isr=temp3 mov cr.ifa=temp4 ld8 temp3=[temp1],16 // cr.itir ld8 temp4=[temp2],16 // cr.iipa ;; mov cr.itir=temp3 mov cr.iipa=temp4 ld8 temp3=[temp1] // cr.iim ld8 temp4=[temp2] // cr.iha add temp1=SOS(OS_STATUS), regs add temp2=SOS(CONTEXT), regs ;; mov cr.iim=temp3 mov cr.iha=temp4 dep r22=0,r22,62,1 // pal_min_state, physical, uncached mov IA64_KR(CURRENT)=r13 ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To * avoid any dependencies on the algorithm in ia64_switch_to(), just * purge any existing CURRENT_STACK mapping and insert the new one. * * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains * prev_IA64_KR_CURRENT, these values may have been changed by the C * code. Do not use r8, r9, r10, r22, they contain values ready for * the return to SAL. */ mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK ;; shl r15=r15,IA64_GRANULE_SHIFT ;; dep r15=-1,r15,61,3 // virtual granule mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps ;; ptr.d r15,r18 ;; srlz.d extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK movl r21=PAGE_KERNEL // page properties ;; mov IA64_KR(CURRENT_STACK)=r16 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? or r21=r20,r21 // construct PA | page properties (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( ;; mov cr.itir=r18 mov cr.ifa=r13 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r21 ;; srlz.d 1: br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_new_stack() // // Stub Description: // // Switch to the MCA/INIT stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // On entry RBS is still on the original stack, this routine switches RBS // to use the MCA/INIT stack. // // On entry, sos->pal_min_state is physical, on exit it is virtual. // //-- ia64_new_stack: add regs=MCA_PT_REGS_OFFSET, r3 add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) invala ;; add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack add regs=regs, temp1 // struct pt_regs on MCA or INIT stack ;; // Address of minstate area provided by PAL is physical, uncacheable. // Convert to Linux virtual address in region 6 for C code. ld8 ms=[temp2] // pal_min_state, physical ;; dep temp1=-1,ms,62,2 // set region 6 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET ;; st8 [temp2]=temp1 // pal_min_state, virtual add temp4=temp3, regs // start of bspstore on new stack ;; mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack ;; flushrs // must be first in group br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_old_stack() // // Stub Description: // // Switch to the old stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // On entry, pal_min_state is virtual, on exit it is physical. // // On entry RBS is on the MCA/INIT stack, this routine switches RBS // back to the previous stack. // // The psr is set to all zeroes. SAL return requires either all zeroes or // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this // code does not perform correctly. // // The dirty registers at the time of the event were flushed to the // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers // before reverting to the previous bspstore. //-- ia64_old_stack: add regs=MCA_PT_REGS_OFFSET, r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp2) LOAD_PHYSICAL(p0,temp1,1f) ;; mov cr.ipsr=r0 mov cr.ifs=r0 mov cr.iip=temp1 ;; invala rfi 1: add regs=regs, temp2 // struct pt_regs on MCA or INIT stack ;; add temp1=PT(LOADRS), regs ;; ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs ;; ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore mov ar.rsc=temp2 ;; loadrs ld8 temp4=[temp1] // restore ar.rnat ;; mov ar.bspstore=temp3 // back to old stack ;; mov ar.rnat=temp4 ;; br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_set_kernel_registers() // // Stub Description: // // Set the registers that are required by the C code in order to run on an // MCA/INIT stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // //-- ia64_set_kernel_registers: add temp3=MCA_SP_OFFSET, r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) ;; add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack add r13=temp1, r3 // set current to start of MCA/INIT stack add r20=temp1, r3 // physical start of MCA/INIT stack ;; DATA_PA_TO_VA(r12,temp2) DATA_PA_TO_VA(r13,temp3) ;; mov IA64_KR(CURRENT)=r13 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid * any dependencies on the algorithm in ia64_switch_to(), just purge * any existing CURRENT_STACK mapping and insert the new one. */ mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK ;; shl r16=r16,IA64_GRANULE_SHIFT ;; dep r16=-1,r16,61,3 // virtual granule mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps ;; ptr.d r16,r18 ;; srlz.d shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack movl r21=PAGE_KERNEL // page properties ;; mov IA64_KR(CURRENT_STACK)=r16 or r21=r20,r21 // construct PA | page properties ;; mov cr.itir=r18 mov cr.ifa=r13 mov r20=IA64_TR_CURRENT_STACK movl r17=FPSR_DEFAULT ;; mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value ;; itr.d dtr[r20]=r21 ;; srlz.d br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// #undef ms #undef regs #undef temp1 #undef temp2 #undef temp3 #undef temp4 // Support function for mca.c, it is here to avoid using inline asm. Given the // address of an rnat slot, if that address is below the current ar.bspstore // then return the contents of that slot, otherwise return the contents of // ar.rnat. GLOBAL_ENTRY(ia64_get_rnat) alloc r14=ar.pfs,1,0,0,0 mov ar.rsc=0 ;; mov r14=ar.bspstore ;; cmp.lt p6,p7=in0,r14 ;; (p6) ld8 r8=[in0] (p7) mov r8=ar.rnat mov ar.rsc=3 br.ret.sptk.many rp END(ia64_get_rnat) // void ia64_set_psr_mc(void) // // Set psr.mc bit to mask MCA/INIT. GLOBAL_ENTRY(ia64_set_psr_mc) rsm psr.i | psr.ic // disable interrupts ;; srlz.d ;; mov r14 = psr // get psr{36:35,31:0} movl r15 = 1f ;; dep r14 = -1, r14, PSR_MC, 1 // set psr.mc ;; dep r14 = -1, r14, PSR_IC, 1 // set psr.ic ;; dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use ;; mov cr.ipsr = r14 mov cr.ifs = r0 mov cr.iip = r15 ;; rfi 1: br.ret.sptk.many rp END(ia64_set_psr_mc)
aixcc-public/challenge-001-exemplar-source
6,200
arch/ia64/lib/clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This routine clears to zero a linear memory buffer in user space. * * Inputs: * in0: address of buffer * in1: length of buffer in bytes * Outputs: * r8: number of bytes that didn't get cleared due to a fault * * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/export.h> // // arguments // #define buf r32 #define len r33 // // local registers // #define cnt r16 #define buf2 r17 #define saved_lc r18 #define saved_pfs r19 #define tmp r20 #define len2 r21 #define len3 r22 // // Theory of operations: // - we check whether or not the buffer is small, i.e., less than 17 // in which case we do the byte by byte loop. // // - Otherwise we go progressively from 1 byte store to 8byte store in // the head part, the body is a 16byte store loop and we finish we the // tail for the last 15 bytes. // The good point about this breakdown is that the long buffer handling // contains only 2 branches. // // The reason for not using shifting & masking for both the head and the // tail is to stay semantically correct. This routine is not supposed // to write bytes outside of the buffer. While most of the time this would // be ok, we can't tolerate a mistake. A classical example is the case // of multithreaded code were to the extra bytes touched is actually owned // by another thread which runs concurrently to ours. Another, less likely, // example is with device drivers where reading an I/O mapped location may // have side effects (same thing for writing). // GLOBAL_ENTRY(__do_clear_user) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,2,0,0,0 cmp.eq p6,p0=r0,len // check for zero length .save ar.lc, saved_lc mov saved_lc=ar.lc // preserve ar.lc (slow) .body ;; // avoid WAW on CFM adds tmp=-1,len // br.ctop is repeat/until mov ret0=len // return value is length at this point (p6) br.ret.spnt.many rp ;; cmp.lt p6,p0=16,len // if len > 16 then long memset mov ar.lc=tmp // initialize lc for small count (p6) br.cond.dptk .long_do_clear ;; // WAR on ar.lc // // worst case 16 iterations, avg 8 iterations // // We could have played with the predicates to use the extra // M slot for 2 stores/iteration but the cost the initialization // the various counters compared to how long the loop is supposed // to last on average does not make this solution viable. // 1: EX( .Lexit1, st1 [buf]=r0,1 ) adds len=-1,len // countdown length using len br.cloop.dptk 1b ;; // avoid RAW on ar.lc // // .Lexit4: comes from byte by byte loop // len contains bytes left .Lexit1: mov ret0=len // faster than using ar.lc mov ar.lc=saved_lc br.ret.sptk.many rp // end of short clear_user // // At this point we know we have more than 16 bytes to copy // so we focus on alignment (no branches required) // // The use of len/len2 for countdown of the number of bytes left // instead of ret0 is due to the fact that the exception code // changes the values of r8. // .long_do_clear: tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear) ;; EX( .Lexit3, (p6) st1 [buf]=r0,1 ) // 1-byte aligned (p6) adds len=-1,len;; // sync because buf is modified tbit.nz p6,p0=buf,1 ;; EX( .Lexit3, (p6) st2 [buf]=r0,2 ) // 2-byte aligned (p6) adds len=-2,len;; tbit.nz p6,p0=buf,2 ;; EX( .Lexit3, (p6) st4 [buf]=r0,4 ) // 4-byte aligned (p6) adds len=-4,len;; tbit.nz p6,p0=buf,3 ;; EX( .Lexit3, (p6) st8 [buf]=r0,8 ) // 8-byte aligned (p6) adds len=-8,len;; shr.u cnt=len,4 // number of 128-bit (2x64bit) words ;; cmp.eq p6,p0=r0,cnt adds tmp=-1,cnt (p6) br.cond.dpnt .dotail // we have less than 16 bytes left ;; adds buf2=8,buf // setup second base pointer mov ar.lc=tmp ;; // // 16bytes/iteration core loop // // The second store can never generate a fault because // we come into the loop only when we are 16-byte aligned. // This means that if we cross a page then it will always be // in the first store and never in the second. // // // We need to keep track of the remaining length. A possible (optimistic) // way would be to use ar.lc and derive how many byte were left by // doing : left= 16*ar.lc + 16. this would avoid the addition at // every iteration. // However we need to keep the synchronization point. A template // M;;MB does not exist and thus we can keep the addition at no // extra cycle cost (use a nop slot anyway). It also simplifies the // (unlikely) error recovery code // 2: EX(.Lexit3, st8 [buf]=r0,16 ) ;; // needed to get len correct when error st8 [buf2]=r0,16 adds len=-16,len br.cloop.dptk 2b ;; mov ar.lc=saved_lc // // tail correction based on len only // // We alternate the use of len3,len2 to allow parallelism and correct // error handling. We also reuse p6/p7 to return correct value. // The addition of len2/len3 does not cost anything more compared to // the regular memset as we had empty slots. // .dotail: mov len2=len // for parallelization of error handling mov len3=len tbit.nz p6,p0=len,3 ;; EX( .Lexit2, (p6) st8 [buf]=r0,8 ) // at least 8 bytes (p6) adds len3=-8,len2 tbit.nz p7,p6=len,2 ;; EX( .Lexit2, (p7) st4 [buf]=r0,4 ) // at least 4 bytes (p7) adds len2=-4,len3 tbit.nz p6,p7=len,1 ;; EX( .Lexit2, (p6) st2 [buf]=r0,2 ) // at least 2 bytes (p6) adds len3=-2,len2 tbit.nz p7,p6=len,0 ;; EX( .Lexit2, (p7) st1 [buf]=r0 ) // only 1 byte left mov ret0=r0 // success br.ret.sptk.many rp // end of most likely path // // Outlined error handling code // // // .Lexit3: comes from core loop, need restore pr/lc // len contains bytes left // // // .Lexit2: // if p6 -> coming from st8 or st2 : len2 contains what's left // if p7 -> coming from st4 or st1 : len3 contains what's left // We must restore lc/pr even though might not have been used. .Lexit2: .pred.rel "mutex", p6, p7 (p6) mov len=len2 (p7) mov len=len3 ;; // // .Lexit4: comes from head, need not restore pr/lc // len contains bytes left // .Lexit3: mov ret0=len mov ar.lc=saved_lc br.ret.sptk.many rp END(__do_clear_user) EXPORT_SYMBOL(__do_clear_user)
aixcc-public/challenge-001-exemplar-source
2,805
arch/ia64/lib/flush.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Cache flushing routines. * * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 05/28/05 Zoltan Menyhart Dynamic stride size */ #include <asm/asmmacro.h> #include <asm/export.h> /* * flush_icache_range(start,end) * * Make i-cache(s) coherent with d-caches. * * Must deal with range from start to end-1 but nothing else (need to * be careful not to touch addresses that may be unmapped). * * Note: "in0" and "in1" are preserved for debugging purposes. */ .section .kprobes.text,"ax" GLOBAL_ENTRY(flush_icache_range) .prologue alloc r2=ar.pfs,2,0,0,0 movl r3=ia64_i_cache_stride_shift mov r21=1 ;; ld8 r20=[r3] // r20: stride shift sub r22=in1,r0,1 // last byte address ;; shr.u r23=in0,r20 // start / (stride size) shr.u r22=r22,r20 // (last byte address) / (stride size) shl r21=r21,r20 // r21: stride size of the i-cache(s) ;; sub r8=r22,r23 // number of strides - 1 shl r24=r23,r20 // r24: addresses for "fc.i" = // "start" rounded down to stride boundary .save ar.lc,r3 mov r3=ar.lc // save ar.lc ;; .body mov ar.lc=r8 ;; /* * 32 byte aligned loop, even number of (actually 2) bundles */ .Loop: fc.i r24 // issuable on M0 only add r24=r21,r24 // we flush "stride size" bytes per iteration nop.i 0 br.cloop.sptk.few .Loop ;; sync.i ;; srlz.i ;; mov ar.lc=r3 // restore ar.lc br.ret.sptk.many rp END(flush_icache_range) EXPORT_SYMBOL_GPL(flush_icache_range) /* * clflush_cache_range(start,size) * * Flush cache lines from start to start+size-1. * * Must deal with range from start to start+size-1 but nothing else * (need to be careful not to touch addresses that may be * unmapped). * * Note: "in0" and "in1" are preserved for debugging purposes. */ .section .kprobes.text,"ax" GLOBAL_ENTRY(clflush_cache_range) .prologue alloc r2=ar.pfs,2,0,0,0 movl r3=ia64_cache_stride_shift mov r21=1 add r22=in1,in0 ;; ld8 r20=[r3] // r20: stride shift sub r22=r22,r0,1 // last byte address ;; shr.u r23=in0,r20 // start / (stride size) shr.u r22=r22,r20 // (last byte address) / (stride size) shl r21=r21,r20 // r21: stride size of the i-cache(s) ;; sub r8=r22,r23 // number of strides - 1 shl r24=r23,r20 // r24: addresses for "fc" = // "start" rounded down to stride // boundary .save ar.lc,r3 mov r3=ar.lc // save ar.lc ;; .body mov ar.lc=r8 ;; /* * 32 byte aligned loop, even number of (actually 2) bundles */ .Loop_fc: fc r24 // issuable on M0 only add r24=r21,r24 // we flush "stride size" bytes per iteration nop.i 0 br.cloop.sptk.few .Loop_fc ;; sync.i ;; srlz.i ;; mov ar.lc=r3 // restore ar.lc br.ret.sptk.many rp END(clflush_cache_range)
aixcc-public/challenge-001-exemplar-source
6,974
arch/ia64/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard memcpy() function * * Inputs: * in0: destination address * in1: source address * in2: number of bytes to copy * Output: * no return value * * Copyright (C) 2000-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(memcpy) # define MEM_LAT 21 /* latency to memory */ # define dst r2 # define src r3 # define retval r8 # define saved_pfs r9 # define saved_lc r10 # define saved_pr r11 # define cnt r16 # define src2 r17 # define t0 r18 # define t1 r19 # define t2 r20 # define t3 r21 # define t4 r22 # define src_end r23 # define N (MEM_LAT + 4) # define Nrot ((N + 7) & ~7) /* * First, check if everything (src, dst, len) is a multiple of eight. If * so, we handle everything with no taken branches (other than the loop * itself) and a small icache footprint. Otherwise, we jump off to * the more general copy routine handling arbitrary * sizes/alignment etc. */ .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot .save ar.lc, saved_lc mov saved_lc=ar.lc or t0=in0,in1 ;; or t0=t0,in2 .save pr, saved_pr mov saved_pr=pr .body cmp.eq p6,p0=in2,r0 // zero length? mov retval=in0 // return dst (p6) br.ret.spnt.many rp // zero length, return immediately ;; mov dst=in0 // copy because of rotation shr.u cnt=in2,3 // number of 8-byte words to copy mov pr.rot=1<<16 ;; adds cnt=-1,cnt // br.ctop is repeat/until cmp.gtu p7,p0=16,in2 // copying less than 16 bytes? mov ar.ec=N ;; and t0=0x7,t0 mov ar.lc=cnt ;; cmp.ne p6,p0=t0,r0 mov src=in1 // copy because of rotation (p7) br.cond.spnt.few .memcpy_short (p6) br.cond.spnt.few .memcpy_long ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; .rotr val[N] .rotp p[N] .align 32 1: { .mib (p[0]) ld8 val[0]=[src],8 nop.i 0 brp.loop.imp 1b, 2f } 2: { .mfb (p[N-1])st8 [dst]=val[N-1],8 nop.f 0 br.ctop.dptk.few 1b } ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov ar.pfs=saved_pfs br.ret.sptk.many rp /* * Small (<16 bytes) unaligned copying is done via a simple byte-at-the-time * copy loop. This performs relatively poorly on Itanium, but it doesn't * get used very often (gcc inlines small copies) and due to atomicity * issues, we want to avoid read-modify-write of entire words. */ .align 32 .memcpy_short: adds cnt=-1,in2 // br.ctop is repeat/until mov ar.ec=MEM_LAT brp.loop.imp 1f, 2f ;; mov ar.lc=cnt ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; nop.m 0 ;; /* * It is faster to put a stop bit in the loop here because it makes * the pipeline shorter (and latency is what matters on short copies). */ .align 32 1: { .mib (p[0]) ld1 val[0]=[src],1 nop.i 0 brp.loop.imp 1b, 2f } ;; 2: { .mfb (p[MEM_LAT-1])st1 [dst]=val[MEM_LAT-1],1 nop.f 0 br.ctop.dptk.few 1b } ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov ar.pfs=saved_pfs br.ret.sptk.many rp /* * Large (>= 16 bytes) copying is done in a fancy way. Latency isn't * an overriding concern here, but throughput is. We first do * sub-word copying until the destination is aligned, then we check * if the source is also aligned. If so, we do a simple load/store-loop * until there are less than 8 bytes left over and then we do the tail, * by storing the last few bytes using sub-word copying. If the source * is not aligned, we branch off to the non-congruent loop. * * stage: op: * 0 ld * : * MEM_LAT+3 shrp * MEM_LAT+4 st * * On Itanium, the pipeline itself runs without stalls. However, br.ctop * seems to introduce an unavoidable bubble in the pipeline so the overall * latency is 2 cycles/iteration. This gives us a _copy_ throughput * of 4 byte/cycle. Still not bad. */ # undef N # undef Nrot # define N (MEM_LAT + 5) /* number of stages */ # define Nrot ((N+1 + 2 + 7) & ~7) /* number of rotating regs */ #define LOG_LOOP_SIZE 6 .memcpy_long: alloc t3=ar.pfs,3,Nrot,0,Nrot // resize register frame and t0=-8,src // t0 = src & ~7 and t2=7,src // t2 = src & 7 ;; ld8 t0=[t0] // t0 = 1st source word adds src2=7,src // src2 = (src + 7) sub t4=r0,dst // t4 = -dst ;; and src2=-8,src2 // src2 = (src + 7) & ~7 shl t2=t2,3 // t2 = 8*(src & 7) shl t4=t4,3 // t4 = 8*(dst & 7) ;; ld8 t1=[src2] // t1 = 1st source word if src is 8-byte aligned, 2nd otherwise sub t3=64,t2 // t3 = 64-8*(src & 7) shr.u t0=t0,t2 ;; add src_end=src,in2 shl t1=t1,t3 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7) ;; or t0=t0,t1 mov cnt=r0 adds src_end=-1,src_end ;; (p3) st1 [dst]=t0,1 (p3) shr.u t0=t0,8 (p3) adds cnt=1,cnt ;; (p4) st2 [dst]=t0,2 (p4) shr.u t0=t0,16 (p4) adds cnt=2,cnt ;; (p5) st4 [dst]=t0,4 (p5) adds cnt=4,cnt and src_end=-8,src_end // src_end = last word of source buffer ;; // At this point, dst is aligned to 8 bytes and there at least 16-7=9 bytes left to copy: 1:{ add src=cnt,src // make src point to remainder of source buffer sub cnt=in2,cnt // cnt = number of bytes left to copy mov t4=ip } ;; and src2=-8,src // align source pointer adds t4=.memcpy_loops-1b,t4 mov ar.ec=N and t0=7,src // t0 = src & 7 shr.u t2=cnt,3 // t2 = number of 8-byte words left to copy shl cnt=cnt,3 // move bits 0-2 to 3-5 ;; .rotr val[N+1], w[2] .rotp p[N] cmp.ne p6,p0=t0,r0 // is src aligned, too? shl t0=t0,LOG_LOOP_SIZE // t0 = 8*(src & 7) adds t2=-1,t2 // br.ctop is repeat/until ;; add t4=t0,t4 mov pr=cnt,0x38 // set (p5,p4,p3) to # of bytes last-word bytes to copy mov ar.lc=t2 ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; (p6) ld8 val[1]=[src2],8 // prime the pump... mov b6=t4 br.sptk.few b6 ;; .memcpy_tail: // At this point, (p5,p4,p3) are set to the number of bytes left to copy (which is // less than 8) and t0 contains the last few bytes of the src buffer: (p5) st4 [dst]=t0,4 (p5) shr.u t0=t0,32 mov ar.lc=saved_lc ;; (p4) st2 [dst]=t0,2 (p4) shr.u t0=t0,16 mov ar.pfs=saved_pfs ;; (p3) st1 [dst]=t0 mov pr=saved_pr,-1 br.ret.sptk.many rp /////////////////////////////////////////////////////// .align 64 #define COPY(shift,index) \ 1: { .mib \ (p[0]) ld8 val[0]=[src2],8; \ (p[MEM_LAT+3]) shrp w[0]=val[MEM_LAT+3],val[MEM_LAT+4-index],shift; \ brp.loop.imp 1b, 2f \ }; \ 2: { .mfb \ (p[MEM_LAT+4]) st8 [dst]=w[1],8; \ nop.f 0; \ br.ctop.dptk.few 1b; \ }; \ ;; \ ld8 val[N-1]=[src_end]; /* load last word (may be same as val[N]) */ \ ;; \ shrp t0=val[N-1],val[N-index],shift; \ br .memcpy_tail .memcpy_loops: COPY(0, 1) /* no point special casing this---it doesn't go any faster without shrp */ COPY(8, 0) COPY(16, 0) COPY(24, 0) COPY(32, 0) COPY(40, 0) COPY(48, 0) COPY(56, 0) END(memcpy) EXPORT_SYMBOL(memcpy)
aixcc-public/challenge-001-exemplar-source
1,990
arch/ia64/lib/clear_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999-2002 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> * * 1/06/01 davidm Tuned for Itanium. * 2/12/02 kchen Tuned for both Itanium and McKinley * 3/08/02 davidm Some more tweaking */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #ifdef CONFIG_ITANIUM # define L3_LINE_SIZE 64 // Itanium L3 line size # define PREFETCH_LINES 9 // magic number #else # define L3_LINE_SIZE 128 // McKinley L3 line size # define PREFETCH_LINES 12 // magic number #endif #define saved_lc r2 #define dst_fetch r3 #define dst1 r8 #define dst2 r9 #define dst3 r10 #define dst4 r11 #define dst_last r31 GLOBAL_ENTRY(clear_page) .prologue .regstk 1,0,0,0 mov r16 = PAGE_SIZE/L3_LINE_SIZE-1 // main loop count, -1=repeat/until .save ar.lc, saved_lc mov saved_lc = ar.lc .body mov ar.lc = (PREFETCH_LINES - 1) mov dst_fetch = in0 adds dst1 = 16, in0 adds dst2 = 32, in0 ;; .fetch: stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE adds dst3 = 48, in0 // executing this multiple times is harmless br.cloop.sptk.few .fetch ;; addl dst_last = (PAGE_SIZE - PREFETCH_LINES*L3_LINE_SIZE), dst_fetch mov ar.lc = r16 // one L3 line per iteration adds dst4 = 64, in0 ;; #ifdef CONFIG_ITANIUM // Optimized for Itanium 1: stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 cmp.lt p8,p0=dst_fetch, dst_last ;; #else // Optimized for McKinley 1: stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 stf.spill.nta [dst3] = f0, 64 stf.spill.nta [dst4] = f0, 128 cmp.lt p8,p0=dst_fetch, dst_last ;; stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 #endif stf.spill.nta [dst3] = f0, 64 (p8) stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE br.cloop.sptk.few 1b ;; mov ar.lc = saved_lc // restore lc br.ret.sptk.many rp END(clear_page) EXPORT_SYMBOL(clear_page)
aixcc-public/challenge-001-exemplar-source
5,959
arch/ia64/lib/copy_page_mck.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * McKinley-optimized version of copy_page(). * * Copyright (C) 2002 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * * Inputs: * in0: address of target page * in1: address of source page * Output: * no return value * * General idea: * - use regular loads and stores to prefetch data to avoid consuming M-slot just for * lfetches => good for in-cache performance * - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single * cycle * * Principle of operation: * First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes. * To avoid secondary misses in L2, we prefetch both source and destination with a line-size * of 128 bytes. When both of these lines are in the L2 and the first half of the * source line is in L1, we start copying the remaining words. The second half of the * source line is prefetched in an earlier iteration, so that by the time we start * accessing it, it's also present in the L1. * * We use a software-pipelined loop to control the overall operation. The pipeline * has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching * source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination * cache-lines, the last K stages are used to copy the cache-line words not copied by * the prefetches. The four relevant points in the pipelined are called A, B, C, D: * p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line * should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought * into L1D and p[D] is TRUE if a cacheline needs to be copied. * * This all sounds very complicated, but thanks to the modulo-scheduled loop support, * the resulting code is very regular and quite easy to follow (once you get the idea). * * As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented * as the separate .prefetch_loop. Logically, this loop performs exactly like the * main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed, * so that each loop iteration is faster (again, good for cached case). * * When reading the code, it helps to keep the following picture in mind: * * word 0 word 1 * +------+------+--- * | v[x] | t1 | ^ * | t2 | t3 | | * | t4 | t5 | | * | t6 | t7 | | 128 bytes * | n[y] | t9 | | (L2 cache line) * | t10 | t11 | | * | t12 | t13 | | * | t14 | t15 | v * +------+------+--- * * Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C] * to fetch the second-half of the L2 cache line into L1, and the tX words are copied in * an order that avoids bank conflicts. */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st) #define src0 r2 #define src1 r3 #define dst0 r9 #define dst1 r10 #define src_pre_mem r11 #define dst_pre_mem r14 #define src_pre_l2 r15 #define dst_pre_l2 r16 #define t1 r17 #define t2 r18 #define t3 r19 #define t4 r20 #define t5 t1 // alias! #define t6 t2 // alias! #define t7 t3 // alias! #define t9 t5 // alias! #define t10 t4 // alias! #define t11 t7 // alias! #define t12 t6 // alias! #define t14 t10 // alias! #define t13 r21 #define t15 r22 #define saved_lc r23 #define saved_pr r24 #define A 0 #define B (PREFETCH_DIST) #define C (B + PREFETCH_DIST) #define D (C + 3) #define N (D + 1) #define Nrot ((N + 7) & ~7) GLOBAL_ENTRY(copy_page) .prologue alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot .rotr v[2*PREFETCH_DIST], n[D-C+1] .rotp p[N] .save ar.lc, saved_lc mov saved_lc = ar.lc .save pr, saved_pr mov saved_pr = pr .body mov src_pre_mem = in1 mov pr.rot = 0x10000 mov ar.ec = 1 // special unrolled loop mov dst_pre_mem = in0 mov ar.lc = 2*PREFETCH_DIST - 1 add src_pre_l2 = 8*8, in1 add dst_pre_l2 = 8*8, in0 add src0 = 8, in1 // first t1 src add src1 = 3*8, in1 // first t3 src add dst0 = 8, in0 // first t1 dst add dst1 = 3*8, in0 // first t3 dst mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1 nop.m 0 nop.i 0 ;; // same as .line_copy loop, but with all predicated-off instructions removed: .prefetch_loop: (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 br.ctop.sptk .prefetch_loop ;; cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero) mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits! mov ar.ec = N // # of stages in pipeline ;; .line_copy: (p[D]) ld8 t2 = [src0], 3*8 // M0 (p[D]) ld8 t4 = [src1], 3*8 // M1 (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory (p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2 ;; (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory (p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2 (p[D]) st8 [dst0] = t1, 8 // M2 (p[D]) st8 [dst1] = t3, 8 // M3 ;; (p[D]) ld8 t5 = [src0], 8 (p[D]) ld8 t7 = [src1], 3*8 (p[D]) st8 [dst0] = t2, 3*8 (p[D]) st8 [dst1] = t4, 3*8 ;; (p[D]) ld8 t6 = [src0], 3*8 (p[D]) ld8 t10 = [src1], 8 (p[D]) st8 [dst0] = t5, 8 (p[D]) st8 [dst1] = t7, 3*8 ;; (p[D]) ld8 t9 = [src0], 3*8 (p[D]) ld8 t11 = [src1], 3*8 (p[D]) st8 [dst0] = t6, 3*8 (p[D]) st8 [dst1] = t10, 8 ;; (p[D]) ld8 t12 = [src0], 8 (p[D]) ld8 t14 = [src1], 8 (p[D]) st8 [dst0] = t9, 3*8 (p[D]) st8 [dst1] = t11, 3*8 ;; (p[D]) ld8 t13 = [src0], 4*8 (p[D]) ld8 t15 = [src1], 4*8 (p[D]) st8 [dst0] = t12, 8 (p[D]) st8 [dst1] = t14, 8 ;; (p[D-1])ld8 t1 = [src0], 8 (p[D-1])ld8 t3 = [src1], 8 (p[D]) st8 [dst0] = t13, 4*8 (p[D]) st8 [dst1] = t15, 4*8 br.ctop.sptk .line_copy ;; mov ar.lc = saved_lc mov pr = saved_pr, -1 br.ret.sptk.many rp END(copy_page) EXPORT_SYMBOL(copy_page)
aixcc-public/challenge-001-exemplar-source
2,204
arch/ia64/lib/idiv64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com> * * 64-bit integer division. * * This code is based on the application note entitled "Divide, Square Root * and Remainder Algorithms for the IA-64 Architecture". This document * is available as Intel document number 248725-002 or via the web at * http://developer.intel.com/software/opensource/numerics/ * * For more details on the theory behind these algorithms, see "IA-64 * and Elementary Functions" by Peter Markstein; HP Professional Books * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions) */ #include <asm/asmmacro.h> #include <asm/export.h> #ifdef MODULO # define OP mod #else # define OP div #endif #ifdef UNSIGNED # define SGN u # define INT_TO_FP(a,b) fcvt.xuf.s1 a=b # define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b #else # define SGN # define INT_TO_FP(a,b) fcvt.xf a=b # define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b #endif #define PASTE1(a,b) a##b #define PASTE(a,b) PASTE1(a,b) #define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3)) GLOBAL_ENTRY(NAME) .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f8 = in0 setf.sig f9 = in1 ;; // Convert the inputs to FP, to avoid FP software-assist faults. INT_TO_FP(f8, f8) INT_TO_FP(f9, f9) ;; frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b) ;; (p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0 (p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1 ;; (p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0 (p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0 ;; #ifdef MODULO sub in1 = r0, in1 // in1 = -b #endif (p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1 (p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0 ;; (p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1 (p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a ;; #ifdef MODULO setf.sig f8 = in0 // f8 = a setf.sig f9 = in1 // f9 = -b #endif (p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2 ;; FP_TO_INT(f11, f11) // q = trunc(q3) ;; #ifdef MODULO xma.l f11 = f11, f9, f8 // r = q*(-b) + a ;; #endif getf.sig r8 = f11 // transfer result to result register br.ret.sptk.many rp END(NAME) EXPORT_SYMBOL(NAME)
aixcc-public/challenge-001-exemplar-source
17,849
arch/ia64/lib/memcpy_mck.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Itanium 2-optimized version of memcpy and copy_user function * * Inputs: * in0: destination address * in1: source address * in2: number of bytes to copy * Output: * for memcpy: return dest * for copy_user: return 0 if success, * or number of byte NOT copied if error occurred. * * Copyright (C) 2002 Intel Corp. * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define EK(y...) EX(y) /* McKinley specific optimization */ #define retval r8 #define saved_pfs r31 #define saved_lc r10 #define saved_pr r11 #define saved_in0 r14 #define saved_in1 r15 #define saved_in2 r16 #define src0 r2 #define src1 r3 #define dst0 r17 #define dst1 r18 #define cnt r9 /* r19-r30 are temp for each code section */ #define PREFETCH_DIST 8 #define src_pre_mem r19 #define dst_pre_mem r20 #define src_pre_l2 r21 #define dst_pre_l2 r22 #define t1 r23 #define t2 r24 #define t3 r25 #define t4 r26 #define t5 t1 // alias! #define t6 t2 // alias! #define t7 t3 // alias! #define n8 r27 #define t9 t5 // alias! #define t10 t4 // alias! #define t11 t7 // alias! #define t12 t6 // alias! #define t14 t10 // alias! #define t13 r28 #define t15 r29 #define tmp r30 /* defines for long_copy block */ #define A 0 #define B (PREFETCH_DIST) #define C (B + PREFETCH_DIST) #define D (C + 1) #define N (D + 1) #define Nrot ((N + 7) & ~7) /* alias */ #define in0 r32 #define in1 r33 #define in2 r34 GLOBAL_ENTRY(memcpy) and r28=0x7,in0 and r29=0x7,in1 mov f6=f0 mov retval=in0 br.cond.sptk .common_code ;; END(memcpy) EXPORT_SYMBOL(memcpy) GLOBAL_ENTRY(__copy_user) .prologue // check dest alignment and r28=0x7,in0 and r29=0x7,in1 mov f6=f1 mov saved_in0=in0 // save dest pointer mov saved_in1=in1 // save src pointer mov retval=r0 // initialize return value ;; .common_code: cmp.gt p15,p0=8,in2 // check for small size cmp.ne p13,p0=0,r28 // check dest alignment cmp.ne p14,p0=0,r29 // check src alignment add src0=0,in1 sub r30=8,r28 // for .align_dest mov saved_in2=in2 // save len ;; add dst0=0,in0 add dst1=1,in0 // dest odd index cmp.le p6,p0 = 1,r30 // for .align_dest (p15) br.cond.dpnt .memcpy_short (p13) br.cond.dpnt .align_dest (p14) br.cond.dpnt .unaligned_src ;; // both dest and src are aligned on 8-byte boundary .aligned_src: .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot .save pr, saved_pr mov saved_pr=pr shr.u cnt=in2,7 // this much cache line ;; cmp.lt p6,p0=2*PREFETCH_DIST,cnt cmp.lt p7,p8=1,cnt .save ar.lc, saved_lc mov saved_lc=ar.lc .body add cnt=-1,cnt add src_pre_mem=0,in1 // prefetch src pointer add dst_pre_mem=0,in0 // prefetch dest pointer ;; (p7) mov ar.lc=cnt // prefetch count (p8) mov ar.lc=r0 (p6) br.cond.dpnt .long_copy ;; .prefetch: lfetch.fault [src_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128 br.cloop.dptk.few .prefetch ;; .medium_copy: and tmp=31,in2 // copy length after iteration shr.u r29=in2,5 // number of 32-byte iteration add dst1=8,dst0 // 2nd dest pointer ;; add cnt=-1,r29 // ctop iteration adjustment cmp.eq p10,p0=r29,r0 // do we really need to loop? add src1=8,src0 // 2nd src pointer cmp.le p6,p0=8,tmp ;; cmp.le p7,p0=16,tmp mov ar.lc=cnt // loop setup cmp.eq p16,p17 = r0,r0 mov ar.ec=2 (p10) br.dpnt.few .aligned_src_tail ;; TEXT_ALIGN(32) 1: EX(.ex_handler, (p16) ld8 r34=[src0],16) EK(.ex_handler, (p16) ld8 r38=[src1],16) EX(.ex_handler, (p17) st8 [dst0]=r33,16) EK(.ex_handler, (p17) st8 [dst1]=r37,16) ;; EX(.ex_handler, (p16) ld8 r32=[src0],16) EK(.ex_handler, (p16) ld8 r36=[src1],16) EX(.ex_handler, (p16) st8 [dst0]=r34,16) EK(.ex_handler, (p16) st8 [dst1]=r38,16) br.ctop.dptk.few 1b ;; .aligned_src_tail: EX(.ex_handler, (p6) ld8 t1=[src0]) mov ar.lc=saved_lc mov ar.pfs=saved_pfs EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8) cmp.le p8,p0=24,tmp and r21=-8,tmp ;; EX(.ex_hndlr_s, (p8) ld8 t3=[src1]) EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1 and in2=7,tmp // remaining length EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2 add src0=src0,r21 // setting up src pointer add dst0=dst0,r21 // setting up dest pointer ;; EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3 mov pr=saved_pr,-1 br.dptk.many .memcpy_short ;; /* code taken from copy_page_mck */ .long_copy: .rotr v[2*PREFETCH_DIST] .rotp p[N] mov src_pre_mem = src0 mov pr.rot = 0x10000 mov ar.ec = 1 // special unrolled loop mov dst_pre_mem = dst0 add src_pre_l2 = 8*8, src0 add dst_pre_l2 = 8*8, dst0 ;; add src0 = 8, src_pre_mem // first t1 src mov ar.lc = 2*PREFETCH_DIST - 1 shr.u cnt=in2,7 // number of lines add src1 = 3*8, src_pre_mem // first t3 src add dst0 = 8, dst_pre_mem // first t1 dst add dst1 = 3*8, dst_pre_mem // first t3 dst ;; and tmp=127,in2 // remaining bytes after this block add cnt = -(2*PREFETCH_DIST) - 1, cnt // same as .line_copy loop, but with all predicated-off instructions removed: .prefetch_loop: EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 br.ctop.sptk .prefetch_loop ;; cmp.eq p16, p0 = r0, r0 // reset p16 to 1 mov ar.lc = cnt mov ar.ec = N // # of stages in pipeline ;; .line_copy: EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2 ;; EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2 EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2 EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3 ;; EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8) EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8) EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8) EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8) EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8) EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8) ;; EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8) EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8) EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8) EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8) EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8) ;; EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8) EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8) EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) br.ctop.sptk .line_copy ;; add dst0=-8,dst0 add src0=-8,src0 mov in2=tmp .restore sp br.sptk.many .medium_copy ;; #define BLOCK_SIZE 128*32 #define blocksize r23 #define curlen r24 // dest is on 8-byte boundary, src is not. We need to do // ld8-ld8, shrp, then st8. Max 8 byte copy per cycle. .unaligned_src: .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,5,0,8 .save ar.lc, saved_lc mov saved_lc=ar.lc .save pr, saved_pr mov saved_pr=pr .body .4k_block: mov saved_in0=dst0 // need to save all input arguments mov saved_in2=in2 mov blocksize=BLOCK_SIZE ;; cmp.lt p6,p7=blocksize,in2 mov saved_in1=src0 ;; (p6) mov in2=blocksize ;; shr.u r21=in2,7 // this much cache line shr.u r22=in2,4 // number of 16-byte iteration and curlen=15,in2 // copy length after iteration and r30=7,src0 // source alignment ;; cmp.lt p7,p8=1,r21 add cnt=-1,r21 ;; add src_pre_mem=0,src0 // prefetch src pointer add dst_pre_mem=0,dst0 // prefetch dest pointer and src0=-8,src0 // 1st src pointer (p7) mov ar.lc = cnt (p8) mov ar.lc = r0 ;; TEXT_ALIGN(32) 1: lfetch.fault [src_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128 br.cloop.dptk.few 1b ;; shladd dst1=r22,3,dst0 // 2nd dest pointer shladd src1=r22,3,src0 // 2nd src pointer cmp.eq p8,p9=r22,r0 // do we really need to loop? cmp.le p6,p7=8,curlen; // have at least 8 byte remaining? add cnt=-1,r22 // ctop iteration adjustment ;; EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer EK(.ex_handler, (p9) ld8 r37=[src1],8) (p8) br.dpnt.few .noloop ;; // The jump address is calculated based on src alignment. The COPYU // macro below need to confine its size to power of two, so an entry // can be caulated using shl instead of an expensive multiply. The // size is then hard coded by the following #define to match the // actual size. This make it somewhat tedious when COPYU macro gets // changed and this need to be adjusted to match. #define LOOP_SIZE 6 1: mov r29=ip // jmp_table thread mov ar.lc=cnt ;; add r29=.jump_table - 1b - (.jmp1-.jump_table), r29 shl r28=r30, LOOP_SIZE // jmp_table thread mov ar.ec=2 // loop setup ;; add r29=r29,r28 // jmp_table thread cmp.eq p16,p17=r0,r0 ;; mov b6=r29 // jmp_table thread ;; br.cond.sptk.few b6 // for 8-15 byte case // We will skip the loop, but need to replicate the side effect // that the loop produces. .noloop: EX(.ex_handler, (p6) ld8 r37=[src1],8) add src0=8,src0 (p6) shl r25=r30,3 ;; EX(.ex_handler, (p6) ld8 r27=[src1]) (p6) shr.u r28=r37,r25 (p6) sub r26=64,r25 ;; (p6) shl r27=r27,r26 ;; (p6) or r21=r28,r27 .unaligned_src_tail: /* check if we have more than blocksize to copy, if so go back */ cmp.gt p8,p0=saved_in2,blocksize ;; (p8) add dst0=saved_in0,blocksize (p8) add src0=saved_in1,blocksize (p8) sub in2=saved_in2,blocksize (p8) br.dpnt .4k_block ;; /* we have up to 15 byte to copy in the tail. * part of work is already done in the jump table code * we are at the following state. * src side: * * xxxxxx xx <----- r21 has xxxxxxxx already * -------- -------- -------- * 0 8 16 * ^ * | * src1 * * dst * -------- -------- -------- * ^ * | * dst1 */ EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy (p6) add curlen=-8,curlen // update length mov ar.pfs=saved_pfs ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov in2=curlen // remaining length mov dst0=dst1 // dest pointer add src0=src1,r30 // forward by src alignment ;; // 7 byte or smaller. .memcpy_short: cmp.le p8,p9 = 1,in2 cmp.le p10,p11 = 2,in2 cmp.le p12,p13 = 3,in2 cmp.le p14,p15 = 4,in2 add src1=1,src0 // second src pointer add dst1=1,dst0 // second dest pointer ;; EX(.ex_handler_short, (p8) ld1 t1=[src0],2) EK(.ex_handler_short, (p10) ld1 t2=[src1],2) (p9) br.ret.dpnt rp // 0 byte copy ;; EX(.ex_handler_short, (p8) st1 [dst0]=t1,2) EK(.ex_handler_short, (p10) st1 [dst1]=t2,2) (p11) br.ret.dpnt rp // 1 byte copy EX(.ex_handler_short, (p12) ld1 t3=[src0],2) EK(.ex_handler_short, (p14) ld1 t4=[src1],2) (p13) br.ret.dpnt rp // 2 byte copy ;; cmp.le p6,p7 = 5,in2 cmp.le p8,p9 = 6,in2 cmp.le p10,p11 = 7,in2 EX(.ex_handler_short, (p12) st1 [dst0]=t3,2) EK(.ex_handler_short, (p14) st1 [dst1]=t4,2) (p15) br.ret.dpnt rp // 3 byte copy ;; EX(.ex_handler_short, (p6) ld1 t5=[src0],2) EK(.ex_handler_short, (p8) ld1 t6=[src1],2) (p7) br.ret.dpnt rp // 4 byte copy ;; EX(.ex_handler_short, (p6) st1 [dst0]=t5,2) EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) (p9) br.ret.dptk rp // 5 byte copy EX(.ex_handler_short, (p10) ld1 t7=[src0],2) (p11) br.ret.dptk rp // 6 byte copy ;; EX(.ex_handler_short, (p10) st1 [dst0]=t7,2) br.ret.dptk rp // done all cases /* Align dest to nearest 8-byte boundary. We know we have at * least 7 bytes to copy, enough to crawl to 8-byte boundary. * Actual number of byte to crawl depend on the dest alignment. * 7 byte or less is taken care at .memcpy_short * src0 - source even index * src1 - source odd index * dst0 - dest even index * dst1 - dest odd index * r30 - distance to 8-byte boundary */ .align_dest: add src1=1,in1 // source odd index cmp.le p7,p0 = 2,r30 // for .align_dest cmp.le p8,p0 = 3,r30 // for .align_dest EX(.ex_handler_short, (p6) ld1 t1=[src0],2) cmp.le p9,p0 = 4,r30 // for .align_dest cmp.le p10,p0 = 5,r30 ;; EX(.ex_handler_short, (p7) ld1 t2=[src1],2) EK(.ex_handler_short, (p8) ld1 t3=[src0],2) cmp.le p11,p0 = 6,r30 EX(.ex_handler_short, (p6) st1 [dst0] = t1,2) cmp.le p12,p0 = 7,r30 ;; EX(.ex_handler_short, (p9) ld1 t4=[src1],2) EK(.ex_handler_short, (p10) ld1 t5=[src0],2) EX(.ex_handler_short, (p7) st1 [dst1] = t2,2) EK(.ex_handler_short, (p8) st1 [dst0] = t3,2) ;; EX(.ex_handler_short, (p11) ld1 t6=[src1],2) EK(.ex_handler_short, (p12) ld1 t7=[src0],2) cmp.eq p6,p7=r28,r29 EX(.ex_handler_short, (p9) st1 [dst1] = t4,2) EK(.ex_handler_short, (p10) st1 [dst0] = t5,2) sub in2=in2,r30 ;; EX(.ex_handler_short, (p11) st1 [dst1] = t6,2) EK(.ex_handler_short, (p12) st1 [dst0] = t7) add dst0=in0,r30 // setup arguments add src0=in1,r30 (p6) br.cond.dptk .aligned_src (p7) br.cond.dpnt .unaligned_src ;; /* main loop body in jump table format */ #define COPYU(shift) \ 1: \ EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \ EK(.ex_handler, (p16) ld8 r36=[src1],8); \ (p17) shrp r35=r33,r34,shift;; /* 1 */ \ EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \ nop.m 0; \ (p16) shrp r38=r36,r37,shift; \ EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ br.ctop.dptk.few 1b;; \ (p7) add src1=-8,src1; /* back out for <8 byte case */ \ shrp r21=r22,r38,shift; /* speculative work */ \ br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ ;; TEXT_ALIGN(32) .jump_table: COPYU(8) // unaligned cases .jmp1: COPYU(16) COPYU(24) COPYU(32) COPYU(40) COPYU(48) COPYU(56) #undef A #undef B #undef C #undef D /* * Due to lack of local tag support in gcc 2.x assembler, it is not clear which * instruction failed in the bundle. The exception algorithm is that we * first figure out the faulting address, then detect if there is any * progress made on the copy, if so, redo the copy from last known copied * location up to the faulting address (exclusive). In the copy_from_user * case, remaining byte in kernel buffer will be zeroed. * * Take copy_from_user as an example, in the code there are multiple loads * in a bundle and those multiple loads could span over two pages, the * faulting address is calculated as page_round_down(max(src0, src1)). * This is based on knowledge that if we can access one byte in a page, we * can access any byte in that page. * * predicate used in the exception handler: * p6-p7: direction * p10-p11: src faulting addr calculation * p12-p13: dst faulting addr calculation */ #define A r19 #define B r20 #define C r21 #define D r22 #define F r28 #define saved_retval loc0 #define saved_rtlink loc1 #define saved_pfs_stack loc2 .ex_hndlr_s: add src0=8,src0 br.sptk .ex_handler ;; .ex_hndlr_d: add dst0=8,dst0 br.sptk .ex_handler ;; .ex_hndlr_lcpy_1: mov src1=src_pre_mem mov dst1=dst_pre_mem cmp.gtu p10,p11=src_pre_mem,saved_in1 cmp.gtu p12,p13=dst_pre_mem,saved_in0 ;; (p10) add src0=8,saved_in1 (p11) mov src0=saved_in1 (p12) add dst0=8,saved_in0 (p13) mov dst0=saved_in0 br.sptk .ex_handler .ex_handler_lcpy: // in line_copy block, the preload addresses should always ahead // of the other two src/dst pointers. Furthermore, src1/dst1 should // always ahead of src0/dst0. mov src1=src_pre_mem mov dst1=dst_pre_mem .ex_handler: mov pr=saved_pr,-1 // first restore pr, lc, and pfs mov ar.lc=saved_lc mov ar.pfs=saved_pfs ;; .ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction cmp.ltu p10,p11=src0,src1 cmp.ltu p12,p13=dst0,dst1 fcmp.eq p8,p0=f6,f0 // is it memcpy? mov tmp = dst0 ;; (p11) mov src1 = src0 // pick the larger of the two (p13) mov dst0 = dst1 // make dst0 the smaller one (p13) mov dst1 = tmp // and dst1 the larger one ;; (p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary (p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary ;; (p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store (p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load mov retval=saved_in2 (p8) ld1 tmp=[src1] // force an oops for memcpy call (p8) st1 [dst1]=r0 // force an oops for memcpy call (p14) br.ret.sptk.many rp /* * The remaining byte to copy is calculated as: * * A = (faulting_addr - orig_src) -> len to faulting ld address * or * (faulting_addr - orig_dst) -> len to faulting st address * B = (cur_dst - orig_dst) -> len copied so far * C = A - B -> len need to be copied * D = orig_len - A -> len need to be left along */ (p6) sub A = F, saved_in0 (p7) sub A = F, saved_in1 clrrrb ;; alloc saved_pfs_stack=ar.pfs,3,3,3,0 cmp.lt p8,p0=A,r0 sub B = dst0, saved_in0 // how many byte copied so far ;; (p8) mov A = 0; // A shouldn't be negative, cap it ;; sub C = A, B sub D = saved_in2, A ;; cmp.gt p8,p0=C,r0 // more than 1 byte? mov r8=0 mov saved_retval = D mov saved_rtlink = b0 add out0=saved_in0, B add out1=saved_in1, B mov out2=C (p8) br.call.sptk.few b0=__copy_user // recursive call ;; add saved_retval=saved_retval,r8 // above might return non-zero value ;; mov retval=saved_retval mov ar.pfs=saved_pfs_stack mov b0=saved_rtlink br.ret.sptk.many rp /* end of McKinley specific optimization */ END(__copy_user) EXPORT_SYMBOL(__copy_user)
aixcc-public/challenge-001-exemplar-source
2,807
arch/ia64/lib/ip_fast_csum.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Optmized version of the ip_fast_csum() function * Used for calculating IP header checksum * * Return: 16bit checksum, complemented * * Inputs: * in0: address of buffer to checksum (char *) * in1: length of the buffer (int) * * Copyright (C) 2002, 2006 Intel Corp. * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com> */ #include <asm/asmmacro.h> #include <asm/export.h> /* * Since we know that most likely this function is called with buf aligned * on 4-byte boundary and 20 bytes in length, we can execution rather quickly * versus calling generic version of do_csum, which has lots of overhead in * handling various alignments and sizes. However, due to lack of constrains * put on the function input argument, cases with alignment not on 4-byte or * size not equal to 20 bytes will be handled by the generic do_csum function. */ #define in0 r32 #define in1 r33 #define in2 r34 #define in3 r35 #define in4 r36 #define ret0 r8 GLOBAL_ENTRY(ip_fast_csum) .prologue .body cmp.ne p6,p7=5,in1 // size other than 20 byte? and r14=3,in0 // is it aligned on 4-byte? add r15=4,in0 // second source pointer ;; cmp.ne.or.andcm p6,p7=r14,r0 ;; (p7) ld4 r20=[in0],8 (p7) ld4 r21=[r15],8 (p6) br.spnt .generic ;; ld4 r22=[in0],8 ld4 r23=[r15],8 ;; ld4 r24=[in0] add r20=r20,r21 add r22=r22,r23 ;; add r20=r20,r22 ;; add r20=r20,r24 ;; shr.u ret0=r20,16 // now need to add the carry zxt2 r20=r20 ;; add r20=ret0,r20 ;; shr.u ret0=r20,16 // add carry again zxt2 r20=r20 ;; add r20=ret0,r20 ;; shr.u ret0=r20,16 zxt2 r20=r20 ;; add r20=ret0,r20 mov r9=0xffff ;; andcm ret0=r9,r20 .restore sp // reset frame state br.ret.sptk.many b0 ;; .generic: .prologue .save ar.pfs, r35 alloc r35=ar.pfs,2,2,2,0 .save rp, r34 mov r34=b0 .body dep.z out1=in1,2,30 mov out0=in0 ;; br.call.sptk.many b0=do_csum ;; andcm ret0=-1,ret0 mov ar.pfs=r35 mov b0=r34 br.ret.sptk.many b0 END(ip_fast_csum) EXPORT_SYMBOL(ip_fast_csum) GLOBAL_ENTRY(csum_ipv6_magic) ld4 r20=[in0],4 ld4 r21=[in1],4 zxt4 in2=in2 ;; ld4 r22=[in0],4 ld4 r23=[in1],4 dep r15=in3,in2,32,16 ;; ld4 r24=[in0],4 ld4 r25=[in1],4 mux1 r15=r15,@rev add r16=r20,r21 add r17=r22,r23 zxt4 in4=in4 ;; ld4 r26=[in0],4 ld4 r27=[in1],4 shr.u r15=r15,16 add r18=r24,r25 add r8=r16,r17 ;; add r19=r26,r27 add r8=r8,r18 ;; add r8=r8,r19 add r15=r15,in4 ;; add r8=r8,r15 ;; shr.u r10=r8,32 // now fold sum into short zxt4 r11=r8 ;; add r8=r10,r11 ;; shr.u r10=r8,16 // yeah, keep it rolling zxt2 r11=r8 ;; add r8=r10,r11 ;; shr.u r10=r8,16 // three times lucky zxt2 r11=r8 ;; add r8=r10,r11 mov r9=0xffff ;; andcm r8=r9,r8 br.ret.sptk.many b0 END(csum_ipv6_magic) EXPORT_SYMBOL(csum_ipv6_magic)
aixcc-public/challenge-001-exemplar-source
2,201
arch/ia64/lib/copy_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard copy_page() function * * Inputs: * in0: address of target page * in1: address of source page * Output: * no return value * * Copyright (C) 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * * 4/06/01 davidm Tuned to make it perform well both for cached and uncached copies. */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define PIPE_DEPTH 3 #define EPI p[PIPE_DEPTH-1] #define lcount r16 #define saved_pr r17 #define saved_lc r18 #define saved_pfs r19 #define src1 r20 #define src2 r21 #define tgt1 r22 #define tgt2 r23 #define srcf r24 #define tgtf r25 #define tgt_last r26 #define Nrot ((8*PIPE_DEPTH+7)&~7) GLOBAL_ENTRY(copy_page) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \ t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] .rotp p[PIPE_DEPTH] .save ar.lc, saved_lc mov saved_lc=ar.lc mov ar.ec=PIPE_DEPTH mov lcount=PAGE_SIZE/64-1 .save pr, saved_pr mov saved_pr=pr mov pr.rot=1<<16 .body mov src1=in1 adds src2=8,in1 mov tgt_last = PAGE_SIZE ;; adds tgt2=8,in0 add srcf=512,in1 mov ar.lc=lcount mov tgt1=in0 add tgtf=512,in0 add tgt_last = tgt_last, in0 ;; 1: (p[0]) ld8 t1[0]=[src1],16 (EPI) st8 [tgt1]=t1[PIPE_DEPTH-1],16 (p[0]) ld8 t2[0]=[src2],16 (EPI) st8 [tgt2]=t2[PIPE_DEPTH-1],16 cmp.ltu p6,p0 = tgtf, tgt_last ;; (p[0]) ld8 t3[0]=[src1],16 (EPI) st8 [tgt1]=t3[PIPE_DEPTH-1],16 (p[0]) ld8 t4[0]=[src2],16 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16 ;; (p[0]) ld8 t5[0]=[src1],16 (EPI) st8 [tgt1]=t5[PIPE_DEPTH-1],16 (p[0]) ld8 t6[0]=[src2],16 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16 ;; (p[0]) ld8 t7[0]=[src1],16 (EPI) st8 [tgt1]=t7[PIPE_DEPTH-1],16 (p[0]) ld8 t8[0]=[src2],16 (EPI) st8 [tgt2]=t8[PIPE_DEPTH-1],16 (p6) lfetch [srcf], 64 (p6) lfetch [tgtf], 64 br.ctop.sptk.few 1b ;; mov pr=saved_pr,0xffffffffffff0000 // restore predicates mov ar.pfs=saved_pfs mov ar.lc=saved_lc br.ret.sptk.many rp END(copy_page) EXPORT_SYMBOL(copy_page)
aixcc-public/challenge-001-exemplar-source
2,205
arch/ia64/lib/idiv32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * * 32-bit integer division. * * This code is based on the application note entitled "Divide, Square Root * and Remainder Algorithms for the IA-64 Architecture". This document * is available as Intel document number 248725-002 or via the web at * http://developer.intel.com/software/opensource/numerics/ * * For more details on the theory behind these algorithms, see "IA-64 * and Elementary Functions" by Peter Markstein; HP Professional Books * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions) */ #include <asm/asmmacro.h> #include <asm/export.h> #ifdef MODULO # define OP mod #else # define OP div #endif #ifdef UNSIGNED # define SGN u # define EXTEND zxt4 # define INT_TO_FP(a,b) fcvt.xuf.s1 a=b # define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b #else # define SGN # define EXTEND sxt4 # define INT_TO_FP(a,b) fcvt.xf a=b # define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b #endif #define PASTE1(a,b) a##b #define PASTE(a,b) PASTE1(a,b) #define NAME PASTE(PASTE(__,SGN),PASTE(OP,si3)) GLOBAL_ENTRY(NAME) .regstk 2,0,0,0 // Transfer inputs to FP registers. mov r2 = 0xffdd // r2 = -34 + 65535 (fp reg format bias) EXTEND in0 = in0 // in0 = a EXTEND in1 = in1 // in1 = b ;; setf.sig f8 = in0 setf.sig f9 = in1 #ifdef MODULO sub in1 = r0, in1 // in1 = -b #endif ;; // Convert the inputs to FP, to avoid FP software-assist faults. INT_TO_FP(f8, f8) INT_TO_FP(f9, f9) ;; setf.exp f7 = r2 // f7 = 2^-34 frcpa.s1 f6, p6 = f8, f9 // y0 = frcpa(b) ;; (p6) fmpy.s1 f8 = f8, f6 // q0 = a*y0 (p6) fnma.s1 f6 = f9, f6, f1 // e0 = -b*y0 + 1 ;; #ifdef MODULO setf.sig f9 = in1 // f9 = -b #endif (p6) fma.s1 f8 = f6, f8, f8 // q1 = e0*q0 + q0 (p6) fma.s1 f6 = f6, f6, f7 // e1 = e0*e0 + 2^-34 ;; #ifdef MODULO setf.sig f7 = in0 #endif (p6) fma.s1 f6 = f6, f8, f8 // q2 = e1*q1 + q1 ;; FP_TO_INT(f6, f6) // q = trunc(q2) ;; #ifdef MODULO xma.l f6 = f6, f9, f7 // r = q*(-b) + a ;; #endif getf.sig r8 = f6 // transfer result to result register br.ret.sptk.many rp END(NAME) EXPORT_SYMBOL(NAME)
aixcc-public/challenge-001-exemplar-source
10,298
arch/ia64/lib/do_csum.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optmized version of the standard do_csum() function * * Return: a 64bit quantity containing the 16bit Internet checksum * * Inputs: * in0: address of buffer to checksum (char *) * in1: length of the buffer (int) * * Copyright (C) 1999, 2001-2002 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * 02/04/22 Ken Chen <kenneth.w.chen@intel.com> * Data locality study on the checksum buffer. * More optimization cleanup - remove excessive stop bits. * 02/04/08 David Mosberger <davidm@hpl.hp.com> * More cleanup and tuning. * 01/04/18 Jun Nakajima <jun.nakajima@intel.com> * Clean up and optimize and the software pipeline, loading two * back-to-back 8-byte words per loop. Clean up the initialization * for the loop. Support the cases where load latency = 1 or 2. * Set CONFIG_IA64_LOAD_LATENCY to 1 or 2 (default). */ #include <asm/asmmacro.h> // // Theory of operations: // The goal is to go as quickly as possible to the point where // we can checksum 16 bytes/loop. Before reaching that point we must // take care of incorrect alignment of first byte. // // The code hereafter also takes care of the "tail" part of the buffer // before entering the core loop, if any. The checksum is a sum so it // allows us to commute operations. So we do the "head" and "tail" // first to finish at full speed in the body. Once we get the head and // tail values, we feed them into the pipeline, very handy initialization. // // Of course we deal with the special case where the whole buffer fits // into one 8 byte word. In this case we have only one entry in the pipeline. // // We use a (LOAD_LATENCY+2)-stage pipeline in the loop to account for // possible load latency and also to accommodate for head and tail. // // The end of the function deals with folding the checksum from 64bits // down to 16bits taking care of the carry. // // This version avoids synchronization in the core loop by also using a // pipeline for the accumulation of the checksum in resultx[] (x=1,2). // // wordx[] (x=1,2) // |---| // | | 0 : new value loaded in pipeline // |---| // | | - : in transit data // |---| // | | LOAD_LATENCY : current value to add to checksum // |---| // | | LOAD_LATENCY+1 : previous value added to checksum // |---| (previous iteration) // // resultx[] (x=1,2) // |---| // | | 0 : initial value // |---| // | | LOAD_LATENCY-1 : new checksum // |---| // | | LOAD_LATENCY : previous value of checksum // |---| // | | LOAD_LATENCY+1 : final checksum when out of the loop // |---| // // // See RFC1071 "Computing the Internet Checksum" for various techniques for // calculating the Internet checksum. // // NOT YET DONE: // - Maybe another algorithm which would take care of the folding at the // end in a different manner // - Work with people more knowledgeable than me on the network stack // to figure out if we could not split the function depending on the // type of packet or alignment we get. Like the ip_fast_csum() routine // where we know we have at least 20bytes worth of data to checksum. // - Do a better job of handling small packets. // - Note on prefetching: it was found that under various load, i.e. ftp read/write, // nfs read/write, the L1 cache hit rate is at 60% and L2 cache hit rate is at 99.8% // on the data that buffer points to (partly because the checksum is often preceded by // a copy_from_user()). This finding indiate that lfetch will not be beneficial since // the data is already in the cache. // #define saved_pfs r11 #define hmask r16 #define tmask r17 #define first1 r18 #define firstval r19 #define firstoff r20 #define last r21 #define lastval r22 #define lastoff r23 #define saved_lc r24 #define saved_pr r25 #define tmp1 r26 #define tmp2 r27 #define tmp3 r28 #define carry1 r29 #define carry2 r30 #define first2 r31 #define buf in0 #define len in1 #define LOAD_LATENCY 2 // XXX fix me #if (LOAD_LATENCY != 1) && (LOAD_LATENCY != 2) # error "Only 1 or 2 is supported/tested for LOAD_LATENCY." #endif #define PIPE_DEPTH (LOAD_LATENCY+2) #define ELD p[LOAD_LATENCY] // end of load #define ELD_1 p[LOAD_LATENCY+1] // and next stage // unsigned long do_csum(unsigned char *buf,long len) GLOBAL_ENTRY(do_csum) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,2,16,0,16 .rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2] .rotp p[PIPE_DEPTH], pC1[2], pC2[2] mov ret0=r0 // in case we have zero length cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len) ;; add tmp1=buf,len // last byte's address .save pr, saved_pr mov saved_pr=pr // preserve predicates (rotation) (p6) br.ret.spnt.many rp // return if zero or negative length mov hmask=-1 // initialize head mask tbit.nz p15,p0=buf,0 // is buf an odd address? and first1=-8,buf // 8-byte align down address of first1 element and firstoff=7,buf // how many bytes off for first1 element mov tmask=-1 // initialize tail mask ;; adds tmp2=-1,tmp1 // last-1 and lastoff=7,tmp1 // how many bytes off for last element ;; sub tmp1=8,lastoff // complement to lastoff and last=-8,tmp2 // address of word containing last byte ;; sub tmp3=last,first1 // tmp3=distance from first1 to last .save ar.lc, saved_lc mov saved_lc=ar.lc // save lc cmp.eq p8,p9=last,first1 // everything fits in one word ? ld8 firstval=[first1],8 // load, ahead of time, "first1" word and tmp1=7, tmp1 // make sure that if tmp1==8 -> tmp1=0 shl tmp2=firstoff,3 // number of bits ;; (p9) ld8 lastval=[last] // load, ahead of time, "last" word, if needed shl tmp1=tmp1,3 // number of bits (p9) adds tmp3=-8,tmp3 // effectively loaded ;; (p8) mov lastval=r0 // we don't need lastval if first1==last shl hmask=hmask,tmp2 // build head mask, mask off [0,first1off[ shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff] ;; .body #define count tmp3 (p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only (p9) and word2[0]=lastval,tmask // mask last it as appropriate shr.u count=count,3 // how many 8-byte? ;; // If count is odd, finish this 8-byte word so that we can // load two back-to-back 8-byte words per loop thereafter. and word1[0]=firstval,hmask // and mask it as appropriate tbit.nz p10,p11=count,0 // if (count is odd) ;; (p8) mov result1[0]=word1[0] (p9) add result1[0]=word1[0],word2[0] ;; cmp.ltu p6,p0=result1[0],word1[0] // check the carry cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte ;; (p6) adds result1[0]=1,result1[0] (p8) br.cond.dptk .do_csum_exit // if (within an 8-byte word) (p11) br.cond.dptk .do_csum16 // if (count is even) // Here count is odd. ld8 word1[1]=[first1],8 // load an 8-byte word cmp.eq p9,p10=1,count // if (count == 1) adds count=-1,count // loaded an 8-byte word ;; add result1[0]=result1[0],word1[1] ;; cmp.ltu p6,p0=result1[0],word1[1] ;; (p6) adds result1[0]=1,result1[0] (p9) br.cond.sptk .do_csum_exit // if (count == 1) exit // Fall through to calculate the checksum, feeding result1[0] as // the initial value in result1[0]. // // Calculate the checksum loading two 8-byte words per loop. // .do_csum16: add first2=8,first1 shr.u count=count,1 // we do 16 bytes per loop ;; adds count=-1,count mov carry1=r0 mov carry2=r0 brp.loop.imp 1f,2f ;; mov ar.ec=PIPE_DEPTH mov ar.lc=count // set lc mov pr.rot=1<<16 // result1[0] must be initialized in advance. mov result2[0]=r0 ;; .align 32 1: (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1] (pC1[1])adds carry1=1,carry1 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1] (pC2[1])adds carry2=1,carry2 (ELD) add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY] (ELD) add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY] 2: (p[0]) ld8 word1[0]=[first1],16 (p[0]) ld8 word2[0]=[first2],16 br.ctop.sptk 1b ;; // Since len is a 32-bit value, carry cannot be larger than a 64-bit value. (pC1[1])adds carry1=1,carry1 // since we miss the last one (pC2[1])adds carry2=1,carry2 ;; add result1[LOAD_LATENCY+1]=result1[LOAD_LATENCY+1],carry1 add result2[LOAD_LATENCY+1]=result2[LOAD_LATENCY+1],carry2 ;; cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2 ;; (p6) adds result1[LOAD_LATENCY+1]=1,result1[LOAD_LATENCY+1] (p7) adds result2[LOAD_LATENCY+1]=1,result2[LOAD_LATENCY+1] ;; add result1[0]=result1[LOAD_LATENCY+1],result2[LOAD_LATENCY+1] ;; cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1] ;; (p6) adds result1[0]=1,result1[0] ;; .do_csum_exit: // // now fold 64 into 16 bits taking care of carry // that's not very good because it has lots of sequentiality // mov tmp3=0xffff zxt4 tmp1=result1[0] shr.u tmp2=result1[0],32 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add ret0=tmp1,tmp2 mov pr=saved_pr,0xffffffffffff0000 ;; // if buf was odd then swap bytes mov ar.pfs=saved_pfs // restore ar.ec (p15) mux1 ret0=ret0,@rev // reverse word ;; mov ar.lc=saved_lc (p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes br.ret.sptk.many rp // I (Jun Nakajima) wrote an equivalent code (see below), but it was // not much better than the original. So keep the original there so that // someone else can challenge. // // shr.u word1[0]=result1[0],32 // zxt4 result1[0]=result1[0] // ;; // add result1[0]=result1[0],word1[0] // ;; // zxt2 result2[0]=result1[0] // extr.u word1[0]=result1[0],16,16 // shr.u carry1=result1[0],32 // ;; // add result2[0]=result2[0],word1[0] // ;; // add result2[0]=result2[0],carry1 // ;; // extr.u ret0=result2[0],16,16 // ;; // add ret0=ret0,result2[0] // ;; // zxt2 ret0=ret0 // mov ar.pfs=saved_pfs // restore ar.ec // mov pr=saved_pr,0xffffffffffff0000 // ;; // // if buf was odd then swap bytes // mov ar.lc=saved_lc //(p15) mux1 ret0=ret0,@rev // reverse word // ;; //(p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes // br.ret.sptk.many rp END(do_csum)
aixcc-public/challenge-001-exemplar-source
9,339
arch/ia64/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Optimized version of the standard memset() function. Copyright (c) 2002 Hewlett-Packard Co/CERN Sverre Jarp <Sverre.Jarp@cern.ch> Return: dest Inputs: in0: dest in1: value in2: count The algorithm is fairly straightforward: set byte by byte until we we get to a 16B-aligned address, then loop on 128 B chunks using an early store as prefetching, then loop on 32B chucks, then clear remaining words, finally clear remaining bytes. Since a stf.spill f0 can store 16B in one go, we use this instruction to get peak speed when value = 0. */ #include <asm/asmmacro.h> #include <asm/export.h> #undef ret #define dest in0 #define value in1 #define cnt in2 #define tmp r31 #define save_lc r30 #define ptr0 r29 #define ptr1 r28 #define ptr2 r27 #define ptr3 r26 #define ptr9 r24 #define loopcnt r23 #define linecnt r22 #define bytecnt r21 #define fvalue f6 // This routine uses only scratch predicate registers (p6 - p15) #define p_scr p6 // default register for same-cycle branches #define p_nz p7 #define p_zr p8 #define p_unalgn p9 #define p_y p11 #define p_n p12 #define p_yy p13 #define p_nn p14 #define MIN1 15 #define MIN1P1HALF 8 #define LINE_SIZE 128 #define LSIZE_SH 7 // shift amount #define PREF_AHEAD 8 GLOBAL_ENTRY(memset) { .mmi .prologue alloc tmp = ar.pfs, 3, 0, 0, 0 lfetch.nt1 [dest] // .save ar.lc, save_lc mov.i save_lc = ar.lc .body } { .mmi mov ret0 = dest // return value cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero cmp.eq p_scr, p0 = cnt, r0 ;; } { .mmi and ptr2 = -(MIN1+1), dest // aligned address and tmp = MIN1, dest // prepare to check for correct alignment tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) } { .mib mov ptr1 = dest mux1 value = value, @brcst // create 8 identical bytes in word (p_scr) br.ret.dpnt.many rp // return immediately if count = 0 ;; } { .mib cmp.ne p_unalgn, p0 = tmp, r0 // } { .mib sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) ;; } { .mmi (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? ;; } { .mib (p_y) add cnt = -8, cnt // (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? } { .mib (p_y) st8 [ptr2] = value,-4 // (p_n) add ptr2 = 4, ptr2 // ;; } { .mib (p_yy) add cnt = -4, cnt // (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? } { .mib (p_yy) st4 [ptr2] = value,-2 // (p_nn) add ptr2 = 2, ptr2 // ;; } { .mmi mov tmp = LINE_SIZE+1 // for compare (p_y) add cnt = -2, cnt // (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? } { .mmi setf.sig fvalue=value // transfer value to FLP side (p_y) st2 [ptr2] = value,-1 // (p_n) add ptr2 = 1, ptr2 // ;; } { .mmi (p_yy) st1 [ptr2] = value // cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? } { .mbb (p_yy) add cnt = -1, cnt // (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few ;; } { .mib nop.m 0 shr.u linecnt = cnt, LSIZE_SH (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill ;; } TEXT_ALIGN(32) // --------------------- // L1A: store ahead into cache lines; fill later { .mmi and tmp = -(LINE_SIZE), cnt // compute end of range mov ptr9 = ptr1 // used for prefetching and cnt = (LINE_SIZE-1), cnt // remainder } { .mmi mov loopcnt = PREF_AHEAD-1 // default prefetch loop cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value ;; } { .mmi (p_scr) add loopcnt = -1, linecnt // add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores) add ptr1 = tmp, ptr1 // first address beyond total range ;; } { .mmi add tmp = -1, linecnt // next loop count mov.i ar.lc = loopcnt // ;; } .pref_l1a: { .mib stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart nop.i 0 br.cloop.dptk.few .pref_l1a ;; } { .mmi add ptr0 = 16, ptr2 // Two stores in parallel mov.i ar.lc = tmp // ;; } .l1ax: { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 32 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? ;; } { .mmb stf8 [ptr2] = fvalue, 24 (p_scr) stf8 [ptr9] = fvalue, 128 br.cloop.dptk.few .l1ax ;; } { .mbb cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 ;; } TEXT_ALIGN(32) .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later { .mmi and tmp = -(LINE_SIZE), cnt // compute end of range mov ptr9 = ptr1 // used for prefetching and cnt = (LINE_SIZE-1), cnt // remainder } { .mmi mov loopcnt = PREF_AHEAD-1 // default prefetch loop cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value ;; } { .mmi (p_scr) add loopcnt = -1, linecnt add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) add ptr1 = tmp, ptr1 // first address beyond total range ;; } { .mmi add tmp = -1, linecnt // next loop count mov.i ar.lc = loopcnt ;; } .pref_l1b: { .mib stf.spill [ptr9] = f0, 128 // Do stores one cache line apart nop.i 0 br.cloop.dptk.few .pref_l1b ;; } { .mmi add ptr0 = 16, ptr2 // Two stores in parallel mov.i ar.lc = tmp ;; } .l1bx: { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 32 ;; } { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 32 ;; } { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 64 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? ;; } { .mmb stf.spill [ptr2] = f0, 32 (p_scr) stf.spill [ptr9] = f0, 128 br.cloop.dptk.few .l1bx ;; } { .mib cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // ;; } .fraction_of_line: { .mib add ptr2 = 16, ptr1 shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 ;; } { .mib cmp.eq p_scr, p0 = loopcnt, r0 add loopcnt = -1, loopcnt (p_scr) br.cond.dpnt.many .store_words ;; } { .mib and cnt = 0x1f, cnt // compute the remaining cnt mov.i ar.lc = loopcnt ;; } TEXT_ALIGN(32) .l2: // ------------------------------------ // L2A: store 32B in 2 cycles { .mmb stf8 [ptr1] = fvalue, 8 stf8 [ptr2] = fvalue, 8 ;; } { .mmb stf8 [ptr1] = fvalue, 24 stf8 [ptr2] = fvalue, 24 br.cloop.dptk.many .l2 ;; } .store_words: { .mib cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch ;; } { .mmi stf8 [ptr1] = fvalue, 8 // store cmp.le p_y, p_n = 16, cnt add cnt = -8, cnt // subtract ;; } { .mmi (p_y) stf8 [ptr1] = fvalue, 8 // store (p_y) cmp.le.unc p_yy, p_nn = 16, cnt (p_y) add cnt = -8, cnt // subtract ;; } { .mmi // store (p_yy) stf8 [ptr1] = fvalue, 8 (p_yy) add cnt = -8, cnt // subtract ;; } .move_bytes_from_alignment: { .mib cmp.eq p_scr, p0 = cnt, r0 tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? (p_scr) br.cond.dpnt.few .restore_and_exit ;; } { .mib (p_y) st4 [ptr1] = value,4 tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? ;; } { .mib (p_yy) st2 [ptr1] = value,2 tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? ;; } { .mib (p_y) st1 [ptr1] = value ;; } .restore_and_exit: { .mib nop.m 0 mov.i ar.lc = save_lc br.ret.sptk.many rp ;; } .move_bytes_unaligned: { .mmi .pred.rel "mutex",p_y, p_n .pred.rel "mutex",p_yy, p_nn (p_n) cmp.le p_yy, p_nn = 4, cnt (p_y) cmp.le p_yy, p_nn = 5, cnt (p_n) add ptr2 = 2, ptr1 } { .mmi (p_y) add ptr2 = 3, ptr1 (p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left] (p_y) add cnt = -1, cnt ;; } { .mmi (p_yy) cmp.le.unc p_y, p0 = 8, cnt add ptr3 = ptr1, cnt // prepare last store mov.i ar.lc = save_lc } { .mmi (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left] (p_yy) add cnt = -4, cnt ;; } { .mmi (p_y) cmp.le.unc p_yy, p0 = 8, cnt add ptr3 = -1, ptr3 // last store tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ? } { .mmi (p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left] (p_y) add cnt = -4, cnt ;; } { .mmi (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left] tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ? } { .mmi (p_yy) add cnt = -4, cnt ;; } { .mmb (p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes (p_y) st1 [ptr3] = value // fill last byte (using ptr3) br.ret.sptk.many rp } END(memset) EXPORT_SYMBOL(memset)
aixcc-public/challenge-001-exemplar-source
17,117
arch/ia64/lib/copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the copy_user() routine. * It is used to copy date across the kernel/user boundary. * * The source and destination are always on opposite side of * the boundary. When reading from user space we must catch * faults on loads. When writing to user space we must catch * errors on stores. Note that because of the nature of the copy * we don't need to worry about overlapping regions. * * * Inputs: * in0 address of source buffer * in1 address of destination buffer * in2 number of bytes to copy * * Outputs: * ret0 0 in case of success. The number of bytes NOT copied in * case of error. * * Copyright (C) 2000-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * Fixme: * - handle the case where we have more than 16 bytes and the alignment * are different. * - more benchmarking * - fix extraneous stop bit introduced by the EX() macro. */ #include <asm/asmmacro.h> #include <asm/export.h> // // Tuneable parameters // #define COPY_BREAK 16 // we do byte copy below (must be >=16) #define PIPE_DEPTH 21 // pipe depth #define EPI p[PIPE_DEPTH-1] // // arguments // #define dst in0 #define src in1 #define len in2 // // local registers // #define t1 r2 // rshift in bytes #define t2 r3 // lshift in bytes #define rshift r14 // right shift in bits #define lshift r15 // left shift in bits #define word1 r16 #define word2 r17 #define cnt r18 #define len2 r19 #define saved_lc r20 #define saved_pr r21 #define tmp r22 #define val r23 #define src1 r24 #define dst1 r25 #define src2 r26 #define dst2 r27 #define len1 r28 #define enddst r29 #define endsrc r30 #define saved_pfs r31 GLOBAL_ENTRY(__copy_user) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH] .rotp p[PIPE_DEPTH] adds len2=-1,len // br.ctop is repeat/until mov ret0=r0 ;; // RAW of cfm when len=0 cmp.eq p8,p0=r0,len // check for zero length .save ar.lc, saved_lc mov saved_lc=ar.lc // preserve ar.lc (slow) (p8) br.ret.spnt.many rp // empty mempcy() ;; add enddst=dst,len // first byte after end of source add endsrc=src,len // first byte after end of destination .save pr, saved_pr mov saved_pr=pr // preserve predicates .body mov dst1=dst // copy because of rotation mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov src1=src // copy because of rotation mov ar.lc=len2 // initialize lc for small count cmp.lt p10,p7=COPY_BREAK,len // if len > COPY_BREAK then long copy xor tmp=src,dst // same alignment test prepare (p10) br.cond.dptk .long_copy_user ;; // RAW pr.rot/p16 ? // // Now we do the byte by byte loop with software pipeline // // p7 is necessarily false by now 1: EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 1b ;; mov ar.lc=saved_lc mov pr=saved_pr,0xffffffffffff0000 mov ar.pfs=saved_pfs // restore ar.ec br.ret.sptk.many rp // end of short memcpy // // Not 8-byte aligned // .diff_align_copy_user: // At this point we know we have more than 16 bytes to copy // and also that src and dest do _not_ have the same alignment. and src2=0x7,src1 // src offset and dst2=0x7,dst1 // dst offset ;; // The basic idea is that we copy byte-by-byte at the head so // that we can reach 8-byte alignment for both src1 and dst1. // Then copy the body using software pipelined 8-byte copy, // shifting the two back-to-back words right and left, then copy // the tail by copying byte-by-byte. // // Fault handling. If the byte-by-byte at the head fails on the // load, then restart and finish the pipleline by copying zeros // to the dst1. Then copy zeros for the rest of dst1. // If 8-byte software pipeline fails on the load, do the same as // failure_in3 does. If the byte-by-byte at the tail fails, it is // handled simply by failure_in_pipe1. // // The case p14 represents the source has more bytes in the // the first word (by the shifted part), whereas the p15 needs to // copy some bytes from the 2nd word of the source that has the // tail of the 1st of the destination. // // // Optimization. If dst1 is 8-byte aligned (quite common), we don't need // to copy the head to dst1, to start 8-byte copy software pipeline. // We know src1 is not 8-byte aligned in this case. // cmp.eq p14,p15=r0,dst2 (p15) br.cond.spnt 1f ;; sub t1=8,src2 mov t2=src2 ;; shl rshift=t2,3 sub len1=len,t1 // set len1 ;; sub lshift=64,rshift ;; br.cond.spnt .word_copy_user ;; 1: cmp.leu p14,p15=src2,dst2 sub t1=dst2,src2 ;; .pred.rel "mutex", p14, p15 (p14) sub word1=8,src2 // (8 - src offset) (p15) sub t1=r0,t1 // absolute value (p15) sub word1=8,dst2 // (8 - dst offset) ;; // For the case p14, we don't need to copy the shifted part to // the 1st word of destination. sub t2=8,t1 (p14) sub word1=word1,t1 ;; sub len1=len,word1 // resulting len (p15) shl rshift=t1,3 // in bits (p14) shl rshift=t2,3 ;; (p14) sub len1=len1,t1 adds cnt=-1,word1 ;; sub lshift=64,rshift mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov ar.lc=cnt ;; 2: EX(.failure_in_pipe2,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 2b ;; clrrrb ;; .word_copy_user: cmp.gtu p9,p0=16,len1 (p9) br.cond.spnt 4f // if (16 > len1) skip 8-byte copy ;; shr.u cnt=len1,3 // number of 64-bit words ;; adds cnt=-1,cnt ;; .pred.rel "mutex", p14, p15 (p14) sub src1=src1,t2 (p15) sub src1=src1,t1 // // Now both src1 and dst1 point to an 8-byte aligned address. And // we have more than 8 bytes to copy. // mov ar.lc=cnt mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false ;; 3: // // The pipleline consists of 3 stages: // 1 (p16): Load a word from src1 // 2 (EPI_1): Shift right pair, saving to tmp // 3 (EPI): Store tmp to dst1 // // To make it simple, use at least 2 (p16) loops to set up val1[n] // because we need 2 back-to-back val1[] to get tmp. // Note that this implies EPI_2 must be p18 or greater. // #define EPI_1 p[PIPE_DEPTH-2] #define SWITCH(pred, shift) cmp.eq pred,p0=shift,rshift #define CASE(pred, shift) \ (pred) br.cond.spnt .copy_user_bit##shift #define BODY(rshift) \ .copy_user_bit##rshift: \ 1: \ EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ EX(3f,(p16) ld8 val1[1]=[src1],8); \ (p16) mov val1[0]=r0; \ br.ctop.dptk 1b; \ ;; \ br.cond.sptk.many .diff_align_do_tail; \ 2: \ (EPI) st8 [dst1]=tmp,8; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ 3: \ (p16) mov val1[1]=r0; \ (p16) mov val1[0]=r0; \ br.ctop.dptk 2b; \ ;; \ br.cond.sptk.many .failure_in2 // // Since the instruction 'shrp' requires a fixed 128-bit value // specifying the bits to shift, we need to provide 7 cases // below. // SWITCH(p6, 8) SWITCH(p7, 16) SWITCH(p8, 24) SWITCH(p9, 32) SWITCH(p10, 40) SWITCH(p11, 48) SWITCH(p12, 56) ;; CASE(p6, 8) CASE(p7, 16) CASE(p8, 24) CASE(p9, 32) CASE(p10, 40) CASE(p11, 48) CASE(p12, 56) ;; BODY(8) BODY(16) BODY(24) BODY(32) BODY(40) BODY(48) BODY(56) ;; .diff_align_do_tail: .pred.rel "mutex", p14, p15 (p14) sub src1=src1,t1 (p14) adds dst1=-8,dst1 (p15) sub dst1=dst1,t1 ;; 4: // Tail correction. // // The problem with this piplelined loop is that the last word is not // loaded and thus parf of the last word written is not correct. // To fix that, we simply copy the tail byte by byte. sub len1=endsrc,src1,1 clrrrb ;; mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov ar.lc=len1 ;; 5: EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 5b ;; mov ar.lc=saved_lc mov pr=saved_pr,0xffffffffffff0000 mov ar.pfs=saved_pfs br.ret.sptk.many rp // // Beginning of long mempcy (i.e. > 16 bytes) // .long_copy_user: tbit.nz p6,p7=src1,0 // odd alignment and tmp=7,tmp ;; cmp.eq p10,p8=r0,tmp mov len1=len // copy because of rotation (p8) br.cond.dpnt .diff_align_copy_user ;; // At this point we know we have more than 16 bytes to copy // and also that both src and dest have the same alignment // which may not be the one we want. So for now we must move // forward slowly until we reach 16byte alignment: no need to // worry about reaching the end of buffer. // EX(.failure_in1,(p6) ld1 val1[0]=[src1],1) // 1-byte aligned (p6) adds len1=-1,len1;; tbit.nz p7,p0=src1,1 ;; EX(.failure_in1,(p7) ld2 val1[1]=[src1],2) // 2-byte aligned (p7) adds len1=-2,len1;; tbit.nz p8,p0=src1,2 ;; // // Stop bit not required after ld4 because if we fail on ld4 // we have never executed the ld1, therefore st1 is not executed. // EX(.failure_in1,(p8) ld4 val2[0]=[src1],4) // 4-byte aligned ;; EX(.failure_out,(p6) st1 [dst1]=val1[0],1) tbit.nz p9,p0=src1,3 ;; // // Stop bit not required after ld8 because if we fail on ld8 // we have never executed the ld2, therefore st2 is not executed. // EX(.failure_in1,(p9) ld8 val2[1]=[src1],8) // 8-byte aligned EX(.failure_out,(p7) st2 [dst1]=val1[1],2) (p8) adds len1=-4,len1 ;; EX(.failure_out, (p8) st4 [dst1]=val2[0],4) (p9) adds len1=-8,len1;; shr.u cnt=len1,4 // number of 128-bit (2x64bit) words ;; EX(.failure_out, (p9) st8 [dst1]=val2[1],8) tbit.nz p6,p0=len1,3 cmp.eq p7,p0=r0,cnt adds tmp=-1,cnt // br.ctop is repeat/until (p7) br.cond.dpnt .dotail // we have less than 16 bytes left ;; adds src2=8,src1 adds dst2=8,dst1 mov ar.lc=tmp ;; // // 16bytes/iteration // 2: EX(.failure_in3,(p16) ld8 val1[0]=[src1],16) (p16) ld8 val2[0]=[src2],16 EX(.failure_out, (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16) (EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 br.ctop.dptk 2b ;; // RAW on src1 when fall through from loop // // Tail correction based on len only // // No matter where we come from (loop or test) the src1 pointer // is 16 byte aligned AND we have less than 16 bytes to copy. // .dotail: EX(.failure_in1,(p6) ld8 val1[0]=[src1],8) // at least 8 bytes tbit.nz p7,p0=len1,2 ;; EX(.failure_in1,(p7) ld4 val1[1]=[src1],4) // at least 4 bytes tbit.nz p8,p0=len1,1 ;; EX(.failure_in1,(p8) ld2 val2[0]=[src1],2) // at least 2 bytes tbit.nz p9,p0=len1,0 ;; EX(.failure_out, (p6) st8 [dst1]=val1[0],8) ;; EX(.failure_in1,(p9) ld1 val2[1]=[src1]) // only 1 byte left mov ar.lc=saved_lc ;; EX(.failure_out,(p7) st4 [dst1]=val1[1],4) mov pr=saved_pr,0xffffffffffff0000 ;; EX(.failure_out, (p8) st2 [dst1]=val2[0],2) mov ar.pfs=saved_pfs ;; EX(.failure_out, (p9) st1 [dst1]=val2[1]) br.ret.sptk.many rp // // Here we handle the case where the byte by byte copy fails // on the load. // Several factors make the zeroing of the rest of the buffer kind of // tricky: // - the pipeline: loads/stores are not in sync (pipeline) // // In the same loop iteration, the dst1 pointer does not directly // reflect where the faulty load was. // // - pipeline effect // When you get a fault on load, you may have valid data from // previous loads not yet store in transit. Such data must be // store normally before moving onto zeroing the rest. // // - single/multi dispersal independence. // // solution: // - we don't disrupt the pipeline, i.e. data in transit in // the software pipeline will be eventually move to memory. // We simply replace the load with a simple mov and keep the // pipeline going. We can't really do this inline because // p16 is always reset to 1 when lc > 0. // .failure_in_pipe1: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 1: (p16) mov val1[0]=r0 (EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 br.ctop.dptk 1b ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // This is the case where the byte by byte copy fails on the load // when we copy the head. We need to finish the pipeline and copy // zeros for the rest of the destination. Since this happens // at the top we still need to fill the body and tail. .failure_in_pipe2: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 2: (p16) mov val1[0]=r0 (EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 br.ctop.dptk 2b ;; sub len=enddst,dst1,1 // precompute len br.cond.dptk.many .failure_in1bis ;; // // Here we handle the head & tail part when we check for alignment. // The following code handles only the load failures. The // main diffculty comes from the fact that loads/stores are // scheduled. So when you fail on a load, the stores corresponding // to previous successful loads must be executed. // // However some simplifications are possible given the way // things work. // // 1) HEAD // Theory of operation: // // Page A | Page B // ---------|----- // 1|8 x // 1 2|8 x // 4|8 x // 1 4|8 x // 2 4|8 x // 1 2 4|8 x // |1 // |2 x // |4 x // // page_size >= 4k (2^12). (x means 4, 2, 1) // Here we suppose Page A exists and Page B does not. // // As we move towards eight byte alignment we may encounter faults. // The numbers on each page show the size of the load (current alignment). // // Key point: // - if you fail on 1, 2, 4 then you have never executed any smaller // size loads, e.g. failing ld4 means no ld1 nor ld2 executed // before. // // This allows us to simplify the cleanup code, because basically you // only have to worry about "pending" stores in the case of a failing // ld8(). Given the way the code is written today, this means only // worry about st2, st4. There we can use the information encapsulated // into the predicates. // // Other key point: // - if you fail on the ld8 in the head, it means you went straight // to it, i.e. 8byte alignment within an unexisting page. // Again this comes from the fact that if you crossed just for the ld8 then // you are 8byte aligned but also 16byte align, therefore you would // either go for the 16byte copy loop OR the ld8 in the tail part. // The combination ld1, ld2, ld4, ld8 where you fail on ld8 is impossible // because it would mean you had 15bytes to copy in which case you // would have defaulted to the byte by byte copy. // // // 2) TAIL // Here we now we have less than 16 bytes AND we are either 8 or 16 byte // aligned. // // Key point: // This means that we either: // - are right on a page boundary // OR // - are at more than 16 bytes from a page boundary with // at most 15 bytes to copy: no chance of crossing. // // This allows us to assume that if we fail on a load we haven't possibly // executed any of the previous (tail) ones, so we don't need to do // any stores. For instance, if we fail on ld2, this means we had // 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4. // // This means that we are in a situation similar the a fault in the // head part. That's nice! // .failure_in1: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied sub len=endsrc,src1,1 // // we know that ret0 can never be zero at this point // because we failed why trying to do a load, i.e. there is still // some work to do. // The failure_in1bis and length problem is taken care of at the // calling side. // ;; .failure_in1bis: // from (.failure_in3) mov ar.lc=len // Continue with a stupid byte store. ;; 5: st1 [dst1]=r0,1 br.cloop.dptk 5b ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // Here we simply restart the loop but instead // of doing loads we fill the pipeline with zeroes // We can't simply store r0 because we may have valid // data in transit in the pipeline. // ar.lc and ar.ec are setup correctly at this point // // we MUST use src1/endsrc here and not dst1/enddst because // of the pipeline effect. // .failure_in3: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied ;; 2: (p16) mov val1[0]=r0 (p16) mov val2[0]=r0 (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16 (EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 br.ctop.dptk 2b ;; cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? sub len=enddst,dst1,1 // precompute len (p6) br.cond.dptk .failure_in1bis ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp .failure_in2: sub ret0=endsrc,src1 cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? sub len=enddst,dst1,1 // precompute len (p6) br.cond.dptk .failure_in1bis ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // handling of failures on stores: that's the easy part // .failure_out: sub ret0=enddst,dst1 mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp END(__copy_user) EXPORT_SYMBOL(__copy_user)
aixcc-public/challenge-001-exemplar-source
1,220
arch/ia64/lib/strncpy_from_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Just like strncpy() except that if a fault occurs during copying, * -EFAULT is returned. * * Inputs: * in0: address of destination buffer * in1: address of string to be copied * in2: length of buffer in bytes * Outputs: * r8: -EFAULT in case of fault or number of bytes copied if no fault * * Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> * * 00/03/06 D. Mosberger Fixed to return proper return value (bug found by * by Andreas Schwab <schwab@suse.de>). */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(__strncpy_from_user) alloc r2=ar.pfs,3,0,0,0 mov r8=0 mov r9=in1 ;; add r10=in1,in2 cmp.eq p6,p0=r0,in2 (p6) br.ret.spnt.many rp // XXX braindead copy loop---this needs to be optimized .Loop1: EX(.Lexit, ld1 r8=[in1],1) ;; EX(.Lexit, st1 [in0]=r8,1) cmp.ne p6,p7=r8,r0 ;; (p6) cmp.ne.unc p8,p0=in1,r10 (p8) br.cond.dpnt.few .Loop1 ;; (p6) mov r8=in2 // buffer filled up---return buffer length (p7) sub r8=in1,r9,1 // return string length (excluding NUL character) [.Lexit:] br.ret.sptk.many rp END(__strncpy_from_user) EXPORT_SYMBOL(__strncpy_from_user)
aixcc-public/challenge-001-exemplar-source
6,489
arch/ia64/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard strlen() function * * * Inputs: * in0 address of string * * Outputs: * ret0 the number of characters in the string (0 if empty string) * does not count the \0 * * Copyright (C) 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * 09/24/99 S.Eranian add speculation recovery code */ #include <asm/asmmacro.h> #include <asm/export.h> // // // This is an enhanced version of the basic strlen. it includes a combination // of compute zero index (czx), parallel comparisons, speculative loads and // loop unroll using rotating registers. // // General Ideas about the algorithm: // The goal is to look at the string in chunks of 8 bytes. // so we need to do a few extra checks at the beginning because the // string may not be 8-byte aligned. In this case we load the 8byte // quantity which includes the start of the string and mask the unused // bytes with 0xff to avoid confusing czx. // We use speculative loads and software pipelining to hide memory // latency and do read ahead safely. This way we defer any exception. // // Because we don't want the kernel to be relying on particular // settings of the DCR register, we provide recovery code in case // speculation fails. The recovery code is going to "redo" the work using // only normal loads. If we still get a fault then we generate a // kernel panic. Otherwise we return the strlen as usual. // // The fact that speculation may fail can be caused, for instance, by // the DCR.dm bit being set. In this case TLB misses are deferred, i.e., // a NaT bit will be set if the translation is not present. The normal // load, on the other hand, will cause the translation to be inserted // if the mapping exists. // // It should be noted that we execute recovery code only when we need // to use the data that has been speculatively loaded: we don't execute // recovery code on pure read ahead data. // // Remarks: // - the cmp r0,r0 is used as a fast way to initialize a predicate // register to 1. This is required to make sure that we get the parallel // compare correct. // // - we don't use the epilogue counter to exit the loop but we need to set // it to zero beforehand. // // - after the loop we must test for Nat values because neither the // czx nor cmp instruction raise a NaT consumption fault. We must be // careful not to look too far for a Nat for which we don't care. // For instance we don't need to look at a NaT in val2 if the zero byte // was in val1. // // - Clearly performance tuning is required. // // // #define saved_pfs r11 #define tmp r10 #define base r16 #define orig r17 #define saved_pr r18 #define src r19 #define mask r20 #define val r21 #define val1 r22 #define val2 r23 GLOBAL_ENTRY(strlen) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8 .rotr v[2], w[2] // declares our 4 aliases extr.u tmp=in0,0,3 // tmp=least significant 3 bits mov orig=in0 // keep trackof initial byte address dep src=0,in0,0,3 // src=8byte-aligned in0 address .save pr, saved_pr mov saved_pr=pr // preserve predicates (rotation) ;; .body ld8 v[1]=[src],8 // must not speculate: can fail here shl tmp=tmp,3 // multiply by 8bits/byte mov mask=-1 // our mask ;; ld8.s w[1]=[src],8 // speculatively load next cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and sub tmp=64,tmp // how many bits to shift our mask on the right ;; shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs) ;; add base=-16,src // keep track of aligned base or v[1]=v[1],mask // now we have a safe initial byte pattern ;; 1: ld8.s v[0]=[src],8 // speculatively load next czx1.r val1=v[1] // search 0 byte from right czx1.r val2=w[1] // search 0 byte from right following 8bytes ;; ld8.s w[0]=[src],8 // speculatively load next to next cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 (p6) br.wtop.dptk 1b // loop until p6 == 0 ;; // // We must return try the recovery code iff // val1_is_nat || (val1==8 && val2_is_nat) // // XXX Fixme // - there must be a better way of doing the test // cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate) tnat.nz p6,p7=val1 // test NaT on val1 (p6) br.cond.spnt .recover // jump to recovery if val1 is NaT ;; // // if we come here p7 is true, i.e., initialized for // cmp // cmp.eq.and p7,p0=8,val1// val1==8? tnat.nz.and p7,p0=val2 // test NaT if val2 (p7) br.cond.spnt .recover // jump to recovery if val2 is NaT ;; (p8) mov val1=val2 // the other test got us out of the loop (p8) adds src=-16,src // correct position when 3 ahead (p9) adds src=-24,src // correct position when 4 ahead ;; sub ret0=src,orig // distance from base sub tmp=8,val1 // which byte in word mov pr=saved_pr,0xffffffffffff0000 ;; sub ret0=ret0,tmp // adjust mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp // end of normal execution // // Outlined recovery code when speculation failed // // This time we don't use speculation and rely on the normal exception // mechanism. that's why the loop is not as good as the previous one // because read ahead is not possible // // IMPORTANT: // Please note that in the case of strlen() as opposed to strlen_user() // we don't use the exception mechanism, as this function is not // supposed to fail. If that happens it means we have a bug and the // code will cause of kernel fault. // // XXX Fixme // - today we restart from the beginning of the string instead // of trying to continue where we left off. // .recover: ld8 val=[base],8 // will fail if unrecoverable fault ;; or val=val,mask // remask first bytes cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop ;; // // ar.ec is still zero here // 2: (p6) ld8 val=[base],8 // will fail if unrecoverable fault ;; czx1.r val1=val // search 0 byte from right ;; cmp.eq p6,p0=8,val1 // val1==8 ? (p6) br.wtop.dptk 2b // loop until p6 == 0 ;; // (avoid WAW on p63) sub ret0=base,orig // distance from base sub tmp=8,val1 mov pr=saved_pr,0xffffffffffff0000 ;; sub ret0=ret0,tmp // length=now - back -1 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp // end of successful recovery code END(strlen) EXPORT_SYMBOL(strlen)
aixcc-public/challenge-001-exemplar-source
3,226
arch/ia64/lib/xor.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/ia64/lib/xor.S * * Optimized RAID-5 checksumming functions for IA-64. */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(xor_ia64_2) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 3, 0, 13, 16 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov ar.lc = in0 mov pr.rot = 1 << 16 ;; .rotr s1[6+1], s2[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[6+1])st8.nta [r8] = d[1], 8 nop.f 0 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_2) EXPORT_SYMBOL(xor_ia64_2) GLOBAL_ENTRY(xor_ia64_3) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 4, 0, 20, 24 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 ;; .rotr s1[6+1], s2[6+1], s3[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] ;; (p[0]) ld8.nta s3[0] = [r18], 8 (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], s3[6] br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_3) EXPORT_SYMBOL(xor_ia64_3) GLOBAL_ENTRY(xor_ia64_4) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 5, 0, 27, 32 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 mov r19 = in4 ;; .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[0]) ld8.nta s3[0] = [r18], 8 (p[0]) ld8.nta s4[0] = [r19], 8 (p[6]) xor r20 = s3[6], s4[6] ;; (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], r20 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_4) EXPORT_SYMBOL(xor_ia64_4) GLOBAL_ENTRY(xor_ia64_5) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 6, 0, 34, 40 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 mov r19 = in4 mov r20 = in5 ;; .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[0]) ld8.nta s3[0] = [r18], 8 (p[0]) ld8.nta s4[0] = [r19], 8 (p[6]) xor r21 = s3[6], s4[6] ;; (p[0]) ld8.nta s5[0] = [r20], 8 (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], r21 ;; (p[6]) xor d[0] = d[0], s5[6] nop.f 0 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_5) EXPORT_SYMBOL(xor_ia64_5)
ajeet17181/mplayer-android
7,886
libmpeg2/motion_comp_arm_s.S
@ motion_comp_arm_s.S @ Copyright (C) 2004 AGAWA Koji <i (AT) atty (DOT) jp> @ @ This file is part of mpeg2dec, a free MPEG-2 video stream decoder. @ See http://libmpeg2.sourceforge.net/ for updates. @ @ mpeg2dec is free software; you can redistribute it and/or modify @ it under the terms of the GNU General Public License as published by @ the Free Software Foundation; either version 2 of the License, or @ (at your option) any later version. @ @ mpeg2dec is distributed in the hope that it will be useful, @ but WITHOUT ANY WARRANTY; without even the implied warranty of @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the @ GNU General Public License for more details. @ @ You should have received a copy of the GNU General Public License @ along with mpeg2dec; if not, write to the Free Software @ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA .text @ ---------------------------------------------------------------- .align .global MC_put_o_16_arm MC_put_o_16_arm: @@ void func(uint8_t * dest, const uint8_t * ref, int stride, int height) pld [r1] stmfd sp!, {r4-r11, lr} @ R14 is also called LR and r4, r1, #3 adr r5, MC_put_o_16_arm_align_jt add r5, r5, r4, lsl #2 ldr pc, [r5] MC_put_o_16_arm_align0: ldmia r1, {r4-r7} add r1, r1, r2 pld [r1] stmia r0, {r4-r7} subs r3, r3, #1 add r0, r0, r2 bne MC_put_o_16_arm_align0 ldmfd sp!, {r4-r11, pc} @@ update PC with LR content. .macro PROC shift ldmia r1, {r4-r8} add r1, r1, r2 mov r9, r4, lsr #(\shift) pld [r1] mov r10, r5, lsr #(\shift) orr r9, r9, r5, lsl #(32-\shift) mov r11, r6, lsr #(\shift) orr r10, r10, r6, lsl #(32-\shift) mov r12, r7, lsr #(\shift) orr r11, r11, r7, lsl #(32-\shift) orr r12, r12, r8, lsl #(32-\shift) stmia r0, {r9-r12} subs r3, r3, #1 add r0, r0, r2 .endm MC_put_o_16_arm_align1: and r1, r1, #0xFFFFFFFC 1: PROC(8) bne 1b ldmfd sp!, {r4-r11, pc} @@ update PC with LR content. MC_put_o_16_arm_align2: and r1, r1, #0xFFFFFFFC 1: PROC(16) bne 1b ldmfd sp!, {r4-r11, pc} @@ update PC with LR content. MC_put_o_16_arm_align3: and r1, r1, #0xFFFFFFFC 1: PROC(24) bne 1b ldmfd sp!, {r4-r11, pc} @@ update PC with LR content. MC_put_o_16_arm_align_jt: .word MC_put_o_16_arm_align0 .word MC_put_o_16_arm_align1 .word MC_put_o_16_arm_align2 .word MC_put_o_16_arm_align3 @ ---------------------------------------------------------------- .align .global MC_put_o_8_arm MC_put_o_8_arm: @@ void func(uint8_t * dest, const uint8_t * ref, int stride, int height) pld [r1] stmfd sp!, {r4-r10, lr} @ R14 is also called LR and r4, r1, #3 adr r5, MC_put_o_8_arm_align_jt add r5, r5, r4, lsl #2 ldr pc, [r5] MC_put_o_8_arm_align0: ldmia r1, {r4-r5} add r1, r1, r2 pld [r1] stmia r0, {r4-r5} add r0, r0, r2 subs r3, r3, #1 bne MC_put_o_8_arm_align0 ldmfd sp!, {r4-r10, pc} @@ update PC with LR content. .macro PROC8 shift ldmia r1, {r4-r6} add r1, r1, r2 mov r9, r4, lsr #(\shift) pld [r1] mov r10, r5, lsr #(\shift) orr r9, r9, r5, lsl #(32-\shift) orr r10, r10, r6, lsl #(32-\shift) stmia r0, {r9-r10} subs r3, r3, #1 add r0, r0, r2 .endm MC_put_o_8_arm_align1: and r1, r1, #0xFFFFFFFC 1: PROC8(8) bne 1b ldmfd sp!, {r4-r10, pc} @@ update PC with LR content. MC_put_o_8_arm_align2: and r1, r1, #0xFFFFFFFC 1: PROC8(16) bne 1b ldmfd sp!, {r4-r10, pc} @@ update PC with LR content. MC_put_o_8_arm_align3: and r1, r1, #0xFFFFFFFC 1: PROC8(24) bne 1b ldmfd sp!, {r4-r10, pc} @@ update PC with LR content. MC_put_o_8_arm_align_jt: .word MC_put_o_8_arm_align0 .word MC_put_o_8_arm_align1 .word MC_put_o_8_arm_align2 .word MC_put_o_8_arm_align3 @ ---------------------------------------------------------------- .macro AVG_PW rW1, rW2 mov \rW2, \rW2, lsl #24 orr \rW2, \rW2, \rW1, lsr #8 eor r9, \rW1, \rW2 and \rW2, \rW1, \rW2 and r10, r9, r12 add \rW2, \rW2, r10, lsr #1 and r10, r9, r11 add \rW2, \rW2, r10 .endm .align .global MC_put_x_16_arm MC_put_x_16_arm: @@ void func(uint8_t * dest, const uint8_t * ref, int stride, int height) pld [r1] stmfd sp!, {r4-r11,lr} @ R14 is also called LR and r4, r1, #3 adr r5, MC_put_x_16_arm_align_jt ldr r11, [r5] mvn r12, r11 add r5, r5, r4, lsl #2 ldr pc, [r5, #4] .macro ADJ_ALIGN_QW shift, R0, R1, R2, R3, R4 mov \R0, \R0, lsr #(\shift) orr \R0, \R0, \R1, lsl #(32 - \shift) mov \R1, \R1, lsr #(\shift) orr \R1, \R1, \R2, lsl #(32 - \shift) mov \R2, \R2, lsr #(\shift) orr \R2, \R2, \R3, lsl #(32 - \shift) mov \R3, \R3, lsr #(\shift) orr \R3, \R3, \R4, lsl #(32 - \shift) mov \R4, \R4, lsr #(\shift) @ and \R4, \R4, #0xFF .endm MC_put_x_16_arm_align0: ldmia r1, {r4-r8} add r1, r1, r2 pld [r1] AVG_PW r7, r8 AVG_PW r6, r7 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r8} subs r3, r3, #1 add r0, r0, r2 bne MC_put_x_16_arm_align0 ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_16_arm_align1: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r8} add r1, r1, r2 pld [r1] ADJ_ALIGN_QW 8, r4, r5, r6, r7, r8 AVG_PW r7, r8 AVG_PW r6, r7 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r8} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_16_arm_align2: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r8} add r1, r1, r2 pld [r1] ADJ_ALIGN_QW 16, r4, r5, r6, r7, r8 AVG_PW r7, r8 AVG_PW r6, r7 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r8} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_16_arm_align3: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r8} add r1, r1, r2 pld [r1] ADJ_ALIGN_QW 24, r4, r5, r6, r7, r8 AVG_PW r7, r8 AVG_PW r6, r7 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r8} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_16_arm_align_jt: .word 0x01010101 .word MC_put_x_16_arm_align0 .word MC_put_x_16_arm_align1 .word MC_put_x_16_arm_align2 .word MC_put_x_16_arm_align3 @ ---------------------------------------------------------------- .align .global MC_put_x_8_arm MC_put_x_8_arm: @@ void func(uint8_t * dest, const uint8_t * ref, int stride, int height) pld [r1] stmfd sp!, {r4-r11,lr} @ R14 is also called LR and r4, r1, #3 adr r5, MC_put_x_8_arm_align_jt ldr r11, [r5] mvn r12, r11 add r5, r5, r4, lsl #2 ldr pc, [r5, #4] .macro ADJ_ALIGN_DW shift, R0, R1, R2 mov \R0, \R0, lsr #(\shift) orr \R0, \R0, \R1, lsl #(32 - \shift) mov \R1, \R1, lsr #(\shift) orr \R1, \R1, \R2, lsl #(32 - \shift) mov \R2, \R2, lsr #(\shift) @ and \R4, \R4, #0xFF .endm MC_put_x_8_arm_align0: ldmia r1, {r4-r6} add r1, r1, r2 pld [r1] AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r6} subs r3, r3, #1 add r0, r0, r2 bne MC_put_x_8_arm_align0 ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_8_arm_align1: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r6} add r1, r1, r2 pld [r1] ADJ_ALIGN_DW 8, r4, r5, r6 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r6} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_8_arm_align2: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r6} add r1, r1, r2 pld [r1] ADJ_ALIGN_DW 16, r4, r5, r6 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r6} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_8_arm_align3: and r1, r1, #0xFFFFFFFC 1: ldmia r1, {r4-r6} add r1, r1, r2 pld [r1] ADJ_ALIGN_DW 24, r4, r5, r6 AVG_PW r5, r6 AVG_PW r4, r5 stmia r0, {r5-r6} subs r3, r3, #1 add r0, r0, r2 bne 1b ldmfd sp!, {r4-r11,pc} @@ update PC with LR content. MC_put_x_8_arm_align_jt: .word 0x01010101 .word MC_put_x_8_arm_align0 .word MC_put_x_8_arm_align1 .word MC_put_x_8_arm_align2 .word MC_put_x_8_arm_align3
aixcc-public/challenge-001-exemplar-source
4,476
arch/arc/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * ARC CPU startup Code * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: Dec 2007 * -Check if we are running on Simulator or on real hardware * to skip certain things during boot on simulator */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> #include <asm/dsp-impl.h> #include <asm/irqflags.h> .macro CPU_EARLY_SETUP ; Setting up Vectror Table (in case exception happens in early boot sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] ; Disable I-cache/D-cache if kernel so configured lr r5, [ARC_REG_IC_BCR] breq r5, 0, 1f ; I$ doesn't exist lr r5, [ARC_REG_IC_CTRL] #ifdef CONFIG_ARC_HAS_ICACHE bclr r5, r5, 0 ; 0 - Enable, 1 is Disable #else bset r5, r5, 0 ; I$ exists, but is not used #endif sr r5, [ARC_REG_IC_CTRL] 1: lr r5, [ARC_REG_DC_BCR] breq r5, 0, 1f ; D$ doesn't exist lr r5, [ARC_REG_DC_CTRL] bclr r5, r5, 6 ; Invalidate (discard w/o wback) #ifdef CONFIG_ARC_HAS_DCACHE bclr r5, r5, 0 ; Enable (+Inv) #else bset r5, r5, 0 ; Disable (+Inv) #endif sr r5, [ARC_REG_DC_CTRL] 1: #ifdef CONFIG_ISA_ARCV2 ; Unaligned access is disabled at reset, so re-enable early as ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access ; by default lr r5, [status32] #ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS bset r5, r5, STATUS_AD_BIT #else ; Although disabled at reset, bootloader might have enabled it bclr r5, r5, STATUS_AD_BIT #endif kflag r5 #ifdef CONFIG_ARC_LPB_DISABLE lr r5, [ARC_REG_LPB_BUILD] breq r5, 0, 1f ; LPB doesn't exist mov r5, 1 sr r5, [ARC_REG_LPB_CTRL] 1: #endif /* CONFIG_ARC_LPB_DISABLE */ /* On HSDK, CCMs need to remapped super early */ #ifdef CONFIG_ARC_SOC_HSDK mov r6, 0x60000000 lr r5, [ARC_REG_ICCM_BUILD] breq r5, 0, 1f sr r6, [ARC_REG_AUX_ICCM] 1: lr r5, [ARC_REG_DCCM_BUILD] breq r5, 0, 2f sr r6, [ARC_REG_AUX_DCCM] 2: #endif /* CONFIG_ARC_SOC_HSDK */ #endif /* CONFIG_ISA_ARCV2 */ ; Config DSP_CTRL properly, so kernel may use integer multiply, ; multiply-accumulate, and divide operations DSP_EARLY_INIT .endm .section .init.text, "ax",@progbits ;---------------------------------------------------------------- ; Default Reset Handler (jumped into from Reset vector) ; - Don't clobber r0,r1,r2 as they might have u-boot provided args ; - Platforms can override this weak version if needed ;---------------------------------------------------------------- WEAK(res_service) j stext END(res_service) ;---------------------------------------------------------------- ; Kernel Entry point ;---------------------------------------------------------------- ENTRY(stext) CPU_EARLY_SETUP #ifdef CONFIG_SMP GET_CPU_ID r5 cmp r5, 0 mov.nz r0, r5 bz .Lmaster_proceed ; Non-Masters wait for Master to boot enough and bring them up ; when they resume, tail-call to entry point mov blink, @first_lines_of_secondary j arc_platform_smp_wait_to_boot .Lmaster_proceed: #endif ; Clear BSS before updating any globals ; XXX: use ZOL here mov r5, __bss_start sub r6, __bss_stop, r5 lsr.f lp_count, r6, 2 lpnz 1f st.ab 0, [r5, 4] 1: ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r1, [@uboot_magic] st r2, [@uboot_arg] ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch ; setup stack (fp, sp) mov fp, 0 ; tsk->thread_info is really a PAGE, whose bottom hoists stack GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) j start_kernel ; "C" entry point END(stext) #ifdef CONFIG_SMP ;---------------------------------------------------------------- ; First lines of code run by secondary before jumping to 'C' ;---------------------------------------------------------------- .section .text, "ax",@progbits ENTRY(first_lines_of_secondary) ; setup per-cpu idle task as "current" on this CPU ld r0, [@secondary_idle_tsk] SET_CURR_TASK_ON_CPU r0, r1 ; setup stack (fp, sp) mov fp, 0 ; set it's stack base to tsk->thread_info bottom GET_TSK_STACK_BASE r0, sp j start_kernel_secondary END(first_lines_of_secondary) #endif
aixcc-public/challenge-001-exemplar-source
12,205
arch/arc/kernel/entry-compact.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARCompact ISA * * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: May 2011 * -Userspace unaligned access emulation * * vineetg: Feb 2011 (ptrace low level code fixes) * -traced syscall return code (r0) was not saved into pt_regs for restoring * into user reg-file when traded task rets to user space. * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs) * were not invoking post-syscall trace hook (jumping directly into * ret_from_system_call) * * vineetg: Nov 2010: * -Vector table jumps (@8 bytes) converted into branches (@4 bytes) * -To maintain the slot size of 8 bytes/vector, added nop, which is * not executed at runtime. * * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well * -Wrappers for sys_{,rt_}sigsuspend() no longer needed as they don't * need ptregs anymore * * Vineetg: Oct 2009 * -In a rare scenario, Process gets a Priv-V exception and gets scheduled * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains * active (AE bit enabled). This causes a double fault for a subseq valid * exception. Thus FAKE RTIE needed in low level Priv-Violation handler. * Instr Error could also cause similar scenario, so same there as well. * * Vineetg: March 2009 (Supporting 2 levels of Interrupts) * * Vineetg: Aug 28th 2008: Bug #94984 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap * Normally CPU does this automatically, however when doing FAKE rtie, * we need to explicitly do this. The problem in macros * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit, * setting it and not clearing it clears ZOL context * * Vineetg: May 16th, 2008 * - r25 now contains the Current Task when in kernel * * Vineetg: Dec 22, 2007 * Minor Surgery of Low Level ISR to make it SMP safe * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR * - _current_task is made an array of NR_CPUS * - Access of _current_task wrapped inside a macro so that if hardware * team agrees for a dedicated reg, no other code is touched * * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004 */ #include <linux/errno.h> #include <linux/linkage.h> /* {ENTRY,EXIT} */ #include <asm/entry.h> #include <asm/irqflags.h> .cpu A7 ;############################ Vector Table ################################# .macro VECTOR lbl #if 1 /* Just in case, build breaks */ j \lbl #else b \lbl nop #endif .endm .section .vector, "ax",@progbits .align 4 /* Each entry in the vector table must occupy 2 words. Since it is a jump * across sections (.vector to .text) we are guaranteed that 'j somewhere' * will use the 'j limm' form of the instruction as long as somewhere is in * a section other than .vector. */ ; ********* Critical System Events ********************** VECTOR res_service ; 0x0, Reset Vector (0x0) VECTOR mem_service ; 0x8, Mem exception (0x1) VECTOR instr_service ; 0x10, Instrn Error (0x2) ; ******************** Device ISRs ********************** #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS VECTOR handle_interrupt_level2 #else VECTOR handle_interrupt_level1 #endif .rept 28 VECTOR handle_interrupt_level1 ; Other devices .endr /* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */ ; ******************** Exceptions ********************** VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20) VECTOR EV_TLBMissI ; 0x108, Instruction TLB miss (0x21) VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22) VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23) ; or Misaligned Access VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24) VECTOR EV_Trap ; 0x128, Trap exception (0x25) VECTOR EV_Extension ; 0x130, Extn Instruction Excp (0x26) .rept 24 VECTOR reserved ; Reserved Exceptions .endr ;##################### Scratch Mem for IRQ stack switching ############# ARCFP_DATA int1_saved_reg .align 32 .type int1_saved_reg, @object .size int1_saved_reg, 4 int1_saved_reg: .zero 4 /* Each Interrupt level needs its own scratch */ ARCFP_DATA int2_saved_reg .type int2_saved_reg, @object .size int2_saved_reg, 4 int2_saved_reg: .zero 4 ; --------------------------------------------- .section .text, "ax",@progbits reserved: flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS ; --------------------------------------------- ; Level 2 ISR: Can interrupt a Level 1 ISR ; --------------------------------------------- ENTRY(handle_interrupt_level2) INTERRUPT_PROLOGUE 2 ;------------------------------------------------------ ; if L2 IRQ interrupted a L1 ISR, disable preemption ; ; This is to avoid a potential L1-L2-L1 scenario ; -L1 IRQ taken ; -L2 interrupts L1 (before L1 ISR could run) ; -preemption off IRQ, user task in syscall picked to run ; -RTIE to userspace ; Returns from L2 context fine ; But both L1 and L2 re-enabled, so another L1 can be taken ; while prev L1 is still unserviced ; ;------------------------------------------------------ ; L2 interrupting L1 implies both L2 and L1 active ; However both A2 and A1 are NOT set in STATUS32, thus ; need to check STATUS32_L2 to determine if L1 was active ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal ; bump thread_info->preempt_count (Disable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] add r9, r9, 1 st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 1: ;------------------------------------------------------ ; setup params for Linux common ISR and invoke it ;------------------------------------------------------ lr r0, [icause2] and r0, r0, 0x1f bl.d @arch_do_IRQ mov r1, sp mov r8,0x2 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg b ret_from_exception END(handle_interrupt_level2) #endif ; --------------------------------------------- ; User Mode Memory Bus Error Interrupt Handler ; (Kernel mode memory errors handled via separate exception vectors) ; --------------------------------------------- ENTRY(mem_service) INTERRUPT_PROLOGUE 2 mov r0, ilink2 mov r1, sp ; User process needs to be killed with SIGBUS, but first need to get ; out of the L2 interrupt context (drop to pure kernel mode) and jump ; off to "C" code where SIGBUS in enqueued lr r3, [status32] bclr r3, r3, STATUS_A2_BIT or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK) sr r3, [status32_l2] mov ilink2, 1f rtie 1: bl do_memory_error b ret_from_exception END(mem_service) ; --------------------------------------------- ; Level 1 ISR ; --------------------------------------------- ENTRY(handle_interrupt_level1) INTERRUPT_PROLOGUE 1 lr r0, [icause1] and r0, r0, 0x1f #ifdef CONFIG_TRACE_IRQFLAGS ; icause1 needs to be read early, before calling tracing, which ; can clobber scratch regs, hence use of stack to stash it push r0 TRACE_ASM_IRQ_DISABLE pop r0 #endif bl.d @arch_do_IRQ mov r1, sp mov r8,0x1 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg b ret_from_exception END(handle_interrupt_level1) ;################### Non TLB Exception Handling ############################# ; --------------------------------------------- ; Protection Violation Exception Handler ; --------------------------------------------- ENTRY(EV_TLBProtV) EXCEPTION_PROLOGUE mov r2, r10 ; ECR set into r10 already lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above) ; Exception auto-disables further Intr/exceptions. ; Re-enable them by pretending to return from exception ; (so rest of handler executes in pure K mode) FAKE_RET_FROM_EXCPN mov r1, sp ; Handle to pt_regs ;------ (5) Type of Protection Violation? ---------- ; ; ProtV Hardware Exception is triggered for Access Faults of 2 types ; -Access Violation : 00_23_(00|01|02|03)_00 ; x r w r+w ; -Unaligned Access : 00_23_04_00 ; bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f ;========= (6a) Access Violation Processing ======== bl do_page_fault b ret_from_exception ;========== (6b) Non aligned access ============ 4: SAVE_CALLEE_SAVED_USER mov r2, sp ; callee_regs bl do_misaligned_access ; TBD: optimize - do this only if a callee reg was involved ; either a dst of emulated LD/ST or src with address-writeback RESTORE_CALLEE_SAVED_USER b ret_from_exception END(EV_TLBProtV) ; Wrapper for Linux page fault handler called from EV_TLBMiss* ; Very similar to ProtV handler case (6a) above, but avoids the extra checks ; for Misaligned access ; ENTRY(call_do_page_fault) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp FAKE_RET_FROM_EXCPN mov blink, ret_from_exception b do_page_fault END(call_do_page_fault) ;############# Common Handlers for ARCompact and ARCv2 ############## #include "entry.S" ;############# Return from Intr/Excp/Trap (ARC Specifics) ############## ; ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) ; IRQ shd definitely not happen between now and rtie ; All 2 entry points to here already disable interrupts .Lrestore_regs: # Interrupts are actually disabled from this point on, but will get # reenabled after we return from interrupt/exception. # But irq tracer needs to be told now... TRACE_ASM_IRQ_ENABLE lr r10, [status32] ; Restore REG File. In case multiple Events outstanding, ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None ; Note that we use realtime STATUS32 (not pt_regs->status32) to ; decide that. and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK) bz .Lexcep_or_pure_K_ret ; Returning from Interrupts (Level 1 or 2) #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS ; Level 2 interrupt return Path - from hardware standpoint bbit0 r10, STATUS_A2_BIT, not_level2_interrupt ;------------------------------------------------------------------ ; However the context returning might not have taken L2 intr itself ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret ; Special considerations needed for the context which took L2 intr ld r9, [sp, PT_event] ; Ensure this is L2 intr context brne r9, event_IRQ2, 149f ;------------------------------------------------------------------ ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier ; so that sched doesn't move to new task, causing L1 to be delayed ; undeterministically. Now that we've achieved that, let's reset ; things to what they were, before returning from L2 context ;---------------------------------------------------------------- ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal ; decrement thread_info->preempt_count (re-enable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] ; paranoid check, given A1 was active when A2 happened, preempt count ; must not be 0 because we would have incremented it. ; If this does happen we simply HALT as it means a BUG !!! cmp r9, 0 bnz 2f flag 1 2: sub r9, r9, 1 st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 149: INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt debug_marker_l2: rtie not_level2_interrupt: #endif INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt debug_marker_l1: rtie .Lexcep_or_pure_K_ret: ;this case is for syscalls or Exceptions or pure kernel mode EXCEPTION_EPILOGUE debug_marker_syscall: rtie END(ret_from_exception)
aixcc-public/challenge-001-exemplar-source
7,289
arch/arc/kernel/entry-arcv2.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * ARCv2 ISA based core Low Level Intr/Traps/Exceptions(non-TLB) Handling * * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ #include <asm/errno.h> #include <asm/arcregs.h> #include <asm/irqflags.h> #include <asm/mmu.h> ; A maximum number of supported interrupts in the core interrupt controller. ; This number is not equal to the maximum interrupt number (256) because ; first 16 lines are reserved for exceptions and are not configurable. #define NR_CPU_IRQS 240 .cpu HS #define VECTOR .word ;############################ Vector Table ################################# .section .vector,"a",@progbits .align 4 # Initial 16 slots are Exception Vectors VECTOR res_service ; Reset Vector VECTOR mem_service ; Mem exception VECTOR instr_service ; Instrn Error VECTOR EV_MachineCheck ; Fatal Machine check VECTOR EV_TLBMissI ; Intruction TLB miss VECTOR EV_TLBMissD ; Data TLB miss VECTOR EV_TLBProtV ; Protection Violation VECTOR EV_PrivilegeV ; Privilege Violation VECTOR EV_SWI ; Software Breakpoint VECTOR EV_Trap ; Trap exception VECTOR EV_Extension ; Extn Instruction Exception VECTOR EV_DivZero ; Divide by Zero VECTOR EV_DCError ; Data Cache Error VECTOR EV_Misaligned ; Misaligned Data Access VECTOR reserved ; Reserved slots VECTOR reserved ; Reserved slots # Begin Interrupt Vectors VECTOR handle_interrupt ; (16) Timer0 VECTOR handle_interrupt ; unused (Timer1) VECTOR handle_interrupt ; unused (WDT) VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI) VECTOR handle_interrupt ; (20) perf Interrupt VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI) VECTOR handle_interrupt ; unused VECTOR handle_interrupt ; (23) unused # End of fixed IRQs .rept NR_CPU_IRQS - 8 VECTOR handle_interrupt .endr .section .text, "ax",@progbits reserved: flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## ENTRY(handle_interrupt) INTERRUPT_PROLOGUE # irq control APIs local_irq_save/restore/disable/enable fiddle with # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio) # However a taken interrupt doesn't clear these bits. Thus irqs_disabled() # query in hard ISR path would return false (since .IE is set) which would # trips genirq interrupt handling asserts. # # So do a "soft" disable of interrutps here. # # Note this disable is only for consistent book-keeping as further interrupts # will be disabled anyways even w/o this. Hardware tracks active interrupts # seperately in AUX_IRQ_ACT.active and will not take new interrupts # unless this one returns (or higher prio becomes pending in 2-prio scheme) IRQ_DISABLE ; icause is banked: one per priority level ; so a higher prio interrupt taken here won't clobber prev prio icause lr r0, [ICAUSE] mov blink, ret_from_exception b.d arch_do_IRQ mov r1, sp END(handle_interrupt) ;################### Non TLB Exception Handling ############################# ENTRY(EV_SWI) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_SWI) ENTRY(EV_DivZero) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_DivZero) ENTRY(EV_DCError) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_DCError) ; --------------------------------------------- ; Memory Error Exception Handler ; - Unlike ARCompact, handles Bus errors for both User/Kernel mode, ; Instruction fetch or Data access, under a single Exception Vector ; --------------------------------------------- ENTRY(mem_service) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_memory_error b ret_from_exception END(mem_service) ENTRY(EV_Misaligned) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp FAKE_RET_FROM_EXCPN SAVE_CALLEE_SAVED_USER mov r2, sp ; callee_regs bl do_misaligned_access ; TBD: optimize - do this only if a callee reg was involved ; either a dst of emulated LD/ST or src with address-writeback RESTORE_CALLEE_SAVED_USER b ret_from_exception END(EV_Misaligned) ; --------------------------------------------- ; Protection Violation Exception Handler ; --------------------------------------------- ENTRY(EV_TLBProtV) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp ; pt_regs FAKE_RET_FROM_EXCPN mov blink, ret_from_exception b do_page_fault END(EV_TLBProtV) ; From Linux standpoint Slow Path I/D TLB Miss is same a ProtV as they ; need to call do_page_fault(). ; ECR in pt_regs provides whether access was R/W/X .global call_do_page_fault .set call_do_page_fault, EV_TLBProtV ;############# Common Handlers for ARCompact and ARCv2 ############## #include "entry.S" ;############# Return from Intr/Excp/Trap (ARCv2 ISA Specifics) ############## ; ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) ; IRQ shd definitely not happen between now and rtie ; All 2 entry points to here already disable interrupts .Lrestore_regs: restore_regs: # Interrpts are actually disabled from this point on, but will get # reenabled after we return from interrupt/exception. # But irq tracer needs to be told now... TRACE_ASM_IRQ_ENABLE ld r0, [sp, PT_status32] ; U/K mode at time of entry lr r10, [AUX_IRQ_ACT] bmsk r11, r10, 15 ; extract AUX_IRQ_ACT.active breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception ;####### Return from Intr ####### .Lisr_ret: debug_marker_l1: ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot btst r0, STATUS_DE_BIT ; Z flag set if bit clear bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set ; Handle special case #1: (Entry via Exception, Return via IRQ) ; ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig ; task now returning to U mode (riding the Intr) ; AUX_IRQ_ACTIVE won't have U bit set (since intr in K mode), hence SP ; won't be switched to correct U mode value (from AUX_SP) ; So force AUX_IRQ_ACT.U for such a case btst r0, STATUS_U_BIT ; Z flag set if K (Z clear for U) bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U sr r11, [AUX_IRQ_ACT] INTERRUPT_EPILOGUE rtie ;####### Return from Exception / pure kernel mode ####### .Lexcept_ret: ; Expects r0 has PT_status32 debug_marker_syscall: EXCEPTION_EPILOGUE rtie ;####### Return from Intr to insn in delay slot ####### ; Handle special case #2: (Entry via Exception in Delay Slot, Return via IRQ) ; ; Intr returning to a Delay Slot (DS) insn ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; entry was via Exception in DS which got preempted in kernel). ; ; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround ; ; Solution is to drop out of interrupt context into pure kernel mode ; and return from pure kernel mode which does right things for delay slot .Lintr_ret_to_delay_slot: debug_marker_ds: ld r2, [@intr_to_DE_cnt] add r2, r2, 1 st r2, [@intr_to_DE_cnt] ; drop out of interrupt context (clear AUX_IRQ_ACT.active) bmskn r11, r10, 15 sr r11, [AUX_IRQ_ACT] b .Lexcept_ret END(ret_from_exception)
aixcc-public/challenge-001-exemplar-source
10,006
arch/arc/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Common Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC * (included from entry-<isa>.S * * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ /*------------------------------------------------------------------ * Function ABI *------------------------------------------------------------------ * * Arguments r0 - r7 * Caller Saved Registers r0 - r12 * Callee Saved Registers r13- r25 * Global Pointer (gp) r26 * Frame Pointer (fp) r27 * Stack Pointer (sp) r28 * Branch link register (blink) r31 *------------------------------------------------------------------ */ ;################### Special Sys Call Wrappers ########################## ENTRY(sys_clone_wrapper) SAVE_CALLEE_SAVED_USER bl @sys_clone DISCARD_CALLEE_SAVED_USER GET_CURR_THR_INFO_FLAGS r10 and.f 0, r10, _TIF_SYSCALL_WORK bnz tracesys_exit b .Lret_from_system_call END(sys_clone_wrapper) ENTRY(sys_clone3_wrapper) SAVE_CALLEE_SAVED_USER bl @sys_clone3 DISCARD_CALLEE_SAVED_USER GET_CURR_THR_INFO_FLAGS r10 and.f 0, r10, _TIF_SYSCALL_WORK bnz tracesys_exit b .Lret_from_system_call END(sys_clone3_wrapper) ENTRY(ret_from_fork) ; when the forked child comes here from the __switch_to function ; r0 has the last task pointer. ; put last task in scheduler queue jl @schedule_tail ld r9, [sp, PT_status32] brne r9, 0, 1f jl.d [r14] ; kernel thread entry point mov r0, r13 ; (see PF_KTHREAD block in copy_thread) 1: ; Return to user space ; 1. Any forked task (Reach here via BRne above) ; 2. First ever init task (Reach here via return from JL above) ; This is the historic "kernel_execve" use-case, to return to init ; user mode, in a round about way since that is always done from ; a kernel thread which is executed via JL above but always returns ; out whenever kernel_execve (now inline do_fork()) is involved b ret_from_exception END(ret_from_fork) ;################### Non TLB Exception Handling ############################# ; --------------------------------------------- ; Instruction Error Exception Handler ; --------------------------------------------- ENTRY(instr_service) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_insterror_or_kprobe b ret_from_exception END(instr_service) ; --------------------------------------------- ; Machine Check Exception Handler ; --------------------------------------------- ENTRY(EV_MachineCheck) EXCEPTION_PROLOGUE lr r2, [ecr] lr r0, [efa] mov r1, sp ; MC excpetions disable MMU ARC_MMU_REENABLE r3 lsr r3, r2, 8 bmsk r3, r3, 7 brne r3, ECR_C_MCHK_DUP_TLB, 1f bl do_tlb_overlap_fault b ret_from_exception 1: ; DEAD END: can't do much, display Regs and HALT SAVE_CALLEE_SAVED_USER GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] j do_machine_check_fault END(EV_MachineCheck) ; --------------------------------------------- ; Privilege Violation Exception Handler ; --------------------------------------------- ENTRY(EV_PrivilegeV) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_privilege_fault b ret_from_exception END(EV_PrivilegeV) ; --------------------------------------------- ; Extension Instruction Exception Handler ; --------------------------------------------- ENTRY(EV_Extension) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_extension_fault b ret_from_exception END(EV_Extension) ;################ Trap Handling (Syscall, Breakpoint) ################## ; --------------------------------------------- ; syscall Tracing ; --------------------------------------------- tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address ; PRE Sys Call Ptrace hook mov r0, sp ; pt_regs needed bl @syscall_trace_entry ; Tracing code now returns the syscall num (orig or modif) mov r8, r0 ; Do the Sys Call as we normally would. ; Validate the Sys Call number cmp r8, NR_syscalls - 1 mov.hi r0, -ENOSYS bhi tracesys_exit ; Restore the sys-call args. Mere invocation of the hook abv could have ; clobbered them (since they are in scratch regs). The tracer could also ; have deliberately changed the syscall args: r0-r7 ld r0, [sp, PT_r0] ld r1, [sp, PT_r1] ld r2, [sp, PT_r2] ld r3, [sp, PT_r3] ld r4, [sp, PT_r4] ld r5, [sp, PT_r5] ld r6, [sp, PT_r6] ld r7, [sp, PT_r7] ld.as r9, [sys_call_table, r8] jl [r9] ; Entry into Sys Call Handler tracesys_exit: st r0, [sp, PT_r0] ; sys call return value in pt_regs ;POST Sys Call Ptrace Hook mov r0, sp ; pt_regs needed bl @syscall_trace_exit b ret_from_exception ; NOT ret_from_system_call at is saves r0 which ; we'd done before calling post hook above ; --------------------------------------------- ; Breakpoint TRAP ; --------------------------------------------- trap_with_param: mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc mov r1, sp ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File ; NOTE: clobbers r12 SAVE_CALLEE_SAVED_USER ; save location of saved Callee Regs @ thread_struct->pc GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] ; Call the trap handler bl do_non_swi_trap ; unwind stack to discard Callee saved Regs DISCARD_CALLEE_SAVED_USER b ret_from_exception ; --------------------------------------------- ; syscall TRAP ; ABI: (r0-r7) upto 8 args, (r8) syscall number ; --------------------------------------------- ENTRY(EV_Trap) EXCEPTION_PROLOGUE lr r12, [efa] FAKE_RET_FROM_EXCPN ;============ TRAP 1 :breakpoints ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR) bmsk.f 0, r10, 7 bnz trap_with_param ;============ TRAP (no param): syscall top level ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 and.f 0, r10, _TIF_SYSCALL_WORK bnz tracesys ; this never comes back ;============ Normal syscall case ; syscall num shd not exceed the total system calls avail cmp r8, NR_syscalls - 1 mov.hi r0, -ENOSYS bhi .Lret_from_system_call ; Offset into the syscall_table and call handler ld.as r9,[sys_call_table, r8] jl [r9] ; Entry into Sys Call Handler .Lret_from_system_call: st r0, [sp, PT_r0] ; sys call return value in pt_regs ; fall through to ret_from_exception END(EV_Trap) ;############# Return from Intr/Excp/Trap (Linux Specifics) ############## ; ; If ret to user mode do we need to handle signals, schedule() et al. ENTRY(ret_from_exception) ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode bbit0 r8, STATUS_U_BIT, resume_kernel_mode ; Before returning to User mode check-for-and-complete any pending work ; such as rescheduling/signal-delivery etc. resume_user_mode_begin: ; Disable IRQs to ensures that chk for pending work itself is atomic ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an ; interim IRQ). IRQ_DISABLE r10 ; Fast Path return to user mode if no pending work GET_CURR_THR_INFO_FLAGS r9 and.f 0, r9, _TIF_WORK_MASK bz .Lrestore_regs ; --- (Slow Path #1) task preemption --- bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals mov blink, resume_user_mode_begin ; tail-call to U mode ret chks j @schedule ; BTST+Bnz causes relo error in link .Lchk_pend_signals: IRQ_ENABLE r10 ; --- (Slow Path #2) pending signal --- mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume() GET_CURR_THR_INFO_FLAGS r9 and.f 0, r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL bz .Lchk_notify_resume ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs ; in pt_reg since the "C" ABI (kernel code) will automatically ; save/restore callee-saved regs. ; ; However, here we need to explicitly save callee regs because ; (i) If this signal causes coredump - full regfile needed ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus ; tracer might call PEEKUSR(CALLEE reg) ; ; NOTE: SP will grow up by size of CALLEE Reg-File SAVE_CALLEE_SAVED_USER ; clobbers r12 ; save location of saved Callee Regs @ thread_struct->callee GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] bl @do_signal ; Ideally we want to discard the Callee reg above, however if this was ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg) RESTORE_CALLEE_SAVED_USER b resume_user_mode_begin ; loop back to start of U mode ret ; --- (Slow Path #3) notify_resume --- .Lchk_notify_resume: btst r9, TIF_NOTIFY_RESUME blnz @do_notify_resume b resume_user_mode_begin ; unconditionally back to U mode ret chks ; for single exit point from this block resume_kernel_mode: ; Disable Interrupts from this point on ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq() ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe IRQ_DISABLE r9 #ifdef CONFIG_PREEMPTION ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] brne r8, 0, .Lrestore_regs ; check if this task's NEED_RESCHED flag set ld r9, [r10, THREAD_INFO_FLAGS] bbit0 r9, TIF_NEED_RESCHED, .Lrestore_regs ; Invoke PREEMPTION jl preempt_schedule_irq ; preempt_schedule_irq() always returns with IRQ disabled #endif b .Lrestore_regs ##### DONT ADD CODE HERE - .Lrestore_regs actually follows in entry-<isa>.S
aixcc-public/challenge-001-exemplar-source
1,569
arch/arc/kernel/ctx_sw_asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: Aug 2009 * -Moved core context switch macro out of entry.S into this file. * -This is the more "natural" hand written assembler */ #include <linux/linkage.h> #include <asm/entry.h> /* For the SAVE_* macros */ #include <asm/asm-offsets.h> #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) ;################### Low Level Context Switch ########################## .section .sched.text,"ax",@progbits .align 4 .global __switch_to .type __switch_to, @function __switch_to: CFI_STARTPROC /* Save regs on kernel mode stack of task */ st.a blink, [sp, -4] st.a fp, [sp, -4] SAVE_CALLEE_SAVED_KERNEL /* Save the now KSP in task->thread.ksp */ #if KSP_WORD_OFF <= 255 st.as sp, [r0, KSP_WORD_OFF] #else /* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */ add2 r24, r0, KSP_WORD_OFF st sp, [r24] #endif /* * Return last task in r0 (return reg) * On ARC, Return reg = First Arg reg = r0. * Since we already have last task in r0, * don't need to do anything special to return it */ /* * switch to new task, contained in r1 * Temp reg r3 is required to get the ptr to store val */ SET_CURR_TASK_ON_CPU r1, r3 /* reload SP with kernel mode stack pointer in task->thread.ksp */ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4] /* restore the registers */ RESTORE_CALLEE_SAVED_KERNEL ld.ab fp, [sp, 4] ld.ab blink, [sp, 4] j [blink] END_CFI(__switch_to)
aixcc-public/challenge-001-exemplar-source
3,027
arch/arc/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/thread_info.h> OUTPUT_ARCH(arc) ENTRY(res_service) #ifdef CONFIG_CPU_BIG_ENDIAN jiffies = jiffies_64 + 4; #else jiffies = jiffies_64; #endif SECTIONS { /* * ICCM starts at 0x8000_0000. So if kernel is relocated to some other * address, make sure peripheral at 0x8z doesn't clash with ICCM * Essentially vector is also in ICCM. */ . = CONFIG_LINUX_LINK_BASE; _int_vec_base_lds = .; .vector : { *(.vector) . = ALIGN(PAGE_SIZE); } #ifdef CONFIG_ARC_HAS_ICCM .text.arcfp : { *(.text.arcfp) . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024); } #endif /* * The reason for having a seperate subsection .init.ramfs is to * prevent objump from including it in kernel dumps * * Reason for having .init.ramfs above .init is to make sure that the * binary blob is tucked away to one side, reducing the displacement * between .init.text and .text, avoiding any possible relocation * errors because of calls from .init.text to .text * Yes such calls do exist. e.g. * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( ) */ __init_begin = .; .init.ramfs : { INIT_RAM_FS } . = ALIGN(PAGE_SIZE); HEAD_TEXT_SECTION INIT_TEXT_SECTION(L1_CACHE_BYTES) /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */ .init.data : { INIT_DATA INIT_SETUP(L1_CACHE_BYTES) INIT_CALLS CON_INITCALL } .init.arch.info : { __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; } PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; .text : { _text = .; _stext = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) } EXCEPTION_TABLE(L1_CACHE_BYTES) _etext = .; _sdata = .; RO_DATA(PAGE_SIZE) /* * 1. this is .data essentially * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned */ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(4, 4, 4) #ifdef CONFIG_ARC_DW2_UNWIND . = ALIGN(PAGE_SIZE); .eh_frame : { __start_unwind = .; *(.eh_frame) __end_unwind = .; } #else /DISCARD/ : { *(.eh_frame) } #endif . = ALIGN(PAGE_SIZE); _end = . ; STABS_DEBUG ELF_DETAILS DISCARDS .arcextmap 0 : { *(.gnu.linkonce.arcextmap.*) *(.arcextmap.*) } #ifndef CONFIG_DEBUG_INFO /DISCARD/ : { *(.debug_frame) } /DISCARD/ : { *(.debug_aranges) } /DISCARD/ : { *(.debug_pubnames) } /DISCARD/ : { *(.debug_info) } /DISCARD/ : { *(.debug_abbrev) } /DISCARD/ : { *(.debug_line) } /DISCARD/ : { *(.debug_str) } /DISCARD/ : { *(.debug_loc) } /DISCARD/ : { *(.debug_macinfo) } /DISCARD/ : { *(.debug_ranges) } #endif #ifdef CONFIG_ARC_HAS_DCCM . = CONFIG_ARC_DCCM_BASE; __arc_dccm_base = .; .data.arcfp : { *(.data.arcfp) } . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024); #endif }
aixcc-public/challenge-001-exemplar-source
1,409
arch/arc/lib/strcpy-700.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ /* If dst and src are 4 byte aligned, copy 8 bytes at a time. If the src is 4, but not 8 byte aligned, we first read 4 bytes to get it 8 byte aligned. Thus, we can do a little read-ahead, without dereferencing a cache line that we should not touch. Note that short and long instructions have been scheduled to avoid branch stalls. The beq_s to r3z could be made unaligned & long to avoid a stall there, but the it is not likely to be taken often, and it would also be likey to cost an unaligned mispredict at the next call. */ #include <linux/linkage.h> ENTRY_CFI(strcpy) or r2,r0,r1 bmsk_s r2,r2,1 brne.d r2,0,charloop mov_s r10,r0 ld_s r3,[r1,0] mov r8,0x01010101 bbit0.d r1,2,loop_start ror r12,r8 sub r2,r3,r8 bic_s r2,r2,r3 tst_s r2,r12 bne r3z mov_s r4,r3 .balign 4 loop: ld.a r3,[r1,4] st.ab r4,[r10,4] loop_start: ld.a r4,[r1,4] sub r2,r3,r8 bic_s r2,r2,r3 tst_s r2,r12 bne_s r3z st.ab r3,[r10,4] sub r2,r4,r8 bic r2,r2,r4 tst r2,r12 beq loop mov_s r3,r4 #ifdef __LITTLE_ENDIAN__ r3z: bmsk.f r1,r3,7 lsr_s r3,r3,8 #else r3z: lsr.f r1,r3,24 asl_s r3,r3,8 #endif bne.d r3z stb.ab r1,[r10,1] j_s [blink] .balign 4 charloop: ldb.ab r3,[r1,1] brne.d r3,0,charloop stb.ab r3,[r10,1] j [blink] END_CFI(strcpy)
aixcc-public/challenge-001-exemplar-source
2,576
arch/arc/lib/strchr-700.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ /* ARC700 has a relatively long pipeline and branch prediction, so we want to avoid branches that are hard to predict. On the other hand, the presence of the norm instruction makes it easier to operate on whole words branch-free. */ #include <linux/linkage.h> ENTRY_CFI(strchr) extb_s r1,r1 asl r5,r1,8 bmsk r2,r0,1 or r5,r5,r1 mov_s r3,0x01010101 breq.d r2,r0,.Laligned asl r4,r5,16 sub_s r0,r0,r2 asl r7,r2,3 ld_s r2,[r0] #ifdef __LITTLE_ENDIAN__ asl r7,r3,r7 #else lsr r7,r3,r7 #endif or r5,r5,r4 ror r4,r3 sub r12,r2,r7 bic_s r12,r12,r2 and r12,r12,r4 brne.d r12,0,.Lfound0_ua xor r6,r2,r5 ld.a r2,[r0,4] sub r12,r6,r7 bic r12,r12,r6 #ifdef __LITTLE_ENDIAN__ and r7,r12,r4 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. b .Lfound_char ; Likewise this one. #else and r12,r12,r4 breq r12,0,.Loop ; For speed, we want this branch to be unaligned. lsr_s r12,r12,7 bic r2,r7,r6 b.d .Lfound_char_b and_s r2,r2,r12 #endif ; /* We require this code address to be unaligned for speed... */ .Laligned: ld_s r2,[r0] or r5,r5,r4 ror r4,r3 ; /* ... so that this code address is aligned, for itself and ... */ .Loop: sub r12,r2,r3 bic_s r12,r12,r2 and r12,r12,r4 brne.d r12,0,.Lfound0 xor r6,r2,r5 ld.a r2,[r0,4] sub r12,r6,r3 bic r12,r12,r6 and r7,r12,r4 breq r7,0,.Loop /* ... so that this branch is unaligned. */ ; Found searched-for character. r0 has already advanced to next word. #ifdef __LITTLE_ENDIAN__ /* We only need the information about the first matching byte (i.e. the least significant matching byte) to be exact, hence there is no problem with carry effects. */ .Lfound_char: sub r3,r7,1 bic r3,r3,r7 norm r2,r3 sub_s r0,r0,1 asr_s r2,r2,3 j.d [blink] sub_s r0,r0,r2 .balign 4 .Lfound0_ua: mov r3,r7 .Lfound0: sub r3,r6,r3 bic r3,r3,r6 and r2,r3,r4 or_s r12,r12,r2 sub_s r3,r12,1 bic_s r3,r3,r12 norm r3,r3 add_s r0,r0,3 asr_s r12,r3,3 asl.f 0,r2,r3 sub_s r0,r0,r12 j_s.d [blink] mov.pl r0,0 #else /* BIG ENDIAN */ .Lfound_char: lsr r7,r7,7 bic r2,r7,r6 .Lfound_char_b: norm r2,r2 sub_s r0,r0,4 asr_s r2,r2,3 j.d [blink] add_s r0,r0,r2 .Lfound0_ua: mov_s r3,r7 .Lfound0: asl_s r2,r2,7 or r7,r6,r4 bic_s r12,r12,r2 sub r2,r7,r3 or r2,r2,r6 bic r12,r2,r12 bic.f r3,r4,r12 norm r3,r3 add.pl r3,r3,1 asr_s r12,r3,3 asl.f 0,r2,r3 add_s r0,r0,r12 j_s.d [blink] mov.mi r0,0 #endif /* ENDIAN */ END_CFI(strchr)
aixcc-public/challenge-001-exemplar-source
1,115
arch/arc/lib/memcpy-700.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> ENTRY_CFI(memcpy) or r3,r0,r1 asl_s r3,r3,30 mov_s r5,r0 brls.d r2,r3,.Lcopy_bytewise sub.f r3,r2,1 ld_s r12,[r1,0] asr.f lp_count,r3,3 bbit0.d r3,2,.Lnox4 bmsk_s r2,r2,1 st.ab r12,[r5,4] ld.a r12,[r1,4] .Lnox4: lppnz .Lendloop ld_s r3,[r1,4] st.ab r12,[r5,4] ld.a r12,[r1,8] st.ab r3,[r5,4] .Lendloop: breq r2,0,.Last_store ld r3,[r5,0] #ifdef __LITTLE_ENDIAN__ add3 r2,-1,r2 ; uses long immediate xor_s r12,r12,r3 bmsk r12,r12,r2 xor_s r12,r12,r3 #else /* BIG ENDIAN */ sub3 r2,31,r2 ; uses long immediate xor_s r3,r3,r12 bmsk r3,r3,r2 xor_s r12,r12,r3 #endif /* ENDIAN */ .Last_store: j_s.d [blink] st r12,[r5,0] .balign 4 .Lcopy_bytewise: jcs [blink] ldb_s r12,[r1,0] lsr.f lp_count,r3 bhs_s .Lnox1 stb.ab r12,[r5,1] ldb.a r12,[r1,1] .Lnox1: lppnz .Lendbloop ldb_s r3,[r1,1] stb.ab r12,[r5,1] ldb.a r12,[r1,2] stb.ab r3,[r5,1] .Lendbloop: j_s.d [blink] stb r12,[r5,0] END_CFI(memcpy)
aixcc-public/challenge-001-exemplar-source
4,405
arch/arc/lib/memcpy-archs.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> #ifdef __LITTLE_ENDIAN__ # define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; << # define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >> # define MERGE_1(RX,RY,IMM) asl RX, RY, IMM # define MERGE_2(RX,RY,IMM) # define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF # define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM #else # define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >> # define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; << # define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; << # define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; << # define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM # define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08 #endif #ifdef CONFIG_ARC_HAS_LL64 # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 # define ZOLAND 0xF #endif ENTRY_CFI(memcpy) mov.f 0, r2 ;;; if size is zero jz.d [blink] mov r3, r0 ; don;t clobber ret val ;;; if size <= 8 cmp r2, 8 bls.d @.Lsmallchunk mov.f lp_count, r2 and.f r4, r0, 0x03 rsub lp_count, r4, 4 lpnz @.Laligndestination ;; LOOP BEGIN ldb.ab r5, [r1,1] sub r2, r2, 1 stb.ab r5, [r3,1] .Laligndestination: ;;; Check the alignment of the source and.f r4, r1, 0x03 bnz.d @.Lsourceunaligned ;;; CASE 0: Both source and destination are 32bit aligned ;;; Convert len to Dwords, unfold x4 lsr.f lp_count, r2, ZOLSHFT lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) STOREX (r6, r3) STOREX (r8, r3) STOREX (r10, r3) STOREX (r4, r3) .Lcopy32_64bytes: and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes .Lsmallchunk: lpnz @.Lcopyremainingbytes ;; LOOP START ldb.ab r5, [r1,1] stb.ab r5, [r3,1] .Lcopyremainingbytes: j [blink] ;;; END CASE 0 .Lsourceunaligned: cmp r4, 2 beq.d @.LunalignedOffby2 sub r2, r2, 1 bhi.d @.LunalignedOffby3 ldb.ab r5, [r1, 1] ;;; CASE 1: The source is unaligned, off by 1 ;; Hence I need to read 1 byte for a 16bit alignment ;; and 2bytes to reach 32bit alignment ldh.ab r6, [r1, 2] sub r2, r2, 2 ;; Convert to words, unfold x2 lsr.f lp_count, r2, 3 MERGE_1 (r6, r6, 8) MERGE_2 (r5, r5, 24) or r5, r5, r6 ;; Both src and dst are aligned lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] ld.ab r8, [r1,4] SHIFT_1 (r7, r6, 24) or r7, r7, r5 SHIFT_2 (r5, r6, 8) SHIFT_1 (r9, r8, 24) or r9, r9, r5 SHIFT_2 (r5, r8, 8) st.ab r7, [r3, 4] st.ab r9, [r3, 4] .Lcopy8bytes_1: ;; Write back the remaining 16bits EXTRACT_1 (r6, r5, 16) sth.ab r6, [r3, 2] ;; Write back the remaining 8bits EXTRACT_2 (r5, r5, 16) stb.ab r5, [r3, 1] and.f lp_count, r2, 0x07 ;Last 8bytes lpnz @.Lcopybytewise_1 ;; LOOP START ldb.ab r6, [r1,1] stb.ab r6, [r3,1] .Lcopybytewise_1: j [blink] .LunalignedOffby2: ;;; CASE 2: The source is unaligned, off by 2 ldh.ab r5, [r1, 2] sub r2, r2, 1 ;; Both src and dst are aligned ;; Convert to words, unfold x2 lsr.f lp_count, r2, 3 #ifdef __BIG_ENDIAN__ asl.nz r5, r5, 16 #endif lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] ld.ab r8, [r1,4] SHIFT_1 (r7, r6, 16) or r7, r7, r5 SHIFT_2 (r5, r6, 16) SHIFT_1 (r9, r8, 16) or r9, r9, r5 SHIFT_2 (r5, r8, 16) st.ab r7, [r3, 4] st.ab r9, [r3, 4] .Lcopy8bytes_2: #ifdef __BIG_ENDIAN__ lsr.nz r5, r5, 16 #endif sth.ab r5, [r3, 2] and.f lp_count, r2, 0x07 ;Last 8bytes lpnz @.Lcopybytewise_2 ;; LOOP START ldb.ab r6, [r1,1] stb.ab r6, [r3,1] .Lcopybytewise_2: j [blink] .LunalignedOffby3: ;;; CASE 3: The source is unaligned, off by 3 ;;; Hence, I need to read 1byte for achieve the 32bit alignment ;; Both src and dst are aligned ;; Convert to words, unfold x2 lsr.f lp_count, r2, 3 #ifdef __BIG_ENDIAN__ asl.ne r5, r5, 24 #endif lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] ld.ab r8, [r1,4] SHIFT_1 (r7, r6, 8) or r7, r7, r5 SHIFT_2 (r5, r6, 24) SHIFT_1 (r9, r8, 8) or r9, r9, r5 SHIFT_2 (r5, r8, 24) st.ab r7, [r3, 4] st.ab r9, [r3, 4] .Lcopy8bytes_3: #ifdef __BIG_ENDIAN__ lsr.nz r5, r5, 24 #endif stb.ab r5, [r3, 1] and.f lp_count, r2, 0x07 ;Last 8bytes lpnz @.Lcopybytewise_3 ;; LOOP START ldb.ab r6, [r1,1] stb.ab r6, [r3,1] .Lcopybytewise_3: j [blink] END_CFI(memcpy)
aixcc-public/challenge-001-exemplar-source
2,793
arch/arc/lib/memset-archs.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> #include <asm/cache.h> /* * The memset implementation below is optimized to use prefetchw and prealloc * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) * If you want to implement optimized memset for other possible L1 data cache * line lengths (32B and 128B) you should rewrite code carefully checking * we don't call any prefetchw/prealloc instruction for L1 cache lines which * don't belongs to memset area. */ #if L1_CACHE_SHIFT == 6 .macro PREALLOC_INSTR reg, off prealloc [\reg, \off] .endm .macro PREFETCHW_INSTR reg, off prefetchw [\reg, \off] .endm #else .macro PREALLOC_INSTR reg, off .endm .macro PREFETCHW_INSTR reg, off .endm #endif ENTRY_CFI(memset) PREFETCHW_INSTR r0, 0 ; Prefetch the first write location mov.f 0, r2 ;;; if size is zero jz.d [blink] mov r3, r0 ; don't clobber ret val ;;; if length < 8 brls.d.nt r2, 8, .Lsmallchunk mov.f lp_count,r2 and.f r4, r0, 0x03 rsub lp_count, r4, 4 lpnz @.Laligndestination ;; LOOP BEGIN stb.ab r1, [r3,1] sub r2, r2, 1 .Laligndestination: ;;; Destination is aligned and r1, r1, 0xFF asl r4, r1, 8 or r4, r4, r1 asl r5, r4, 16 or r5, r5, r4 mov r4, r5 sub3 lp_count, r2, 8 cmp r2, 64 bmsk.hi r2, r2, 5 mov.ls lp_count, 0 add3.hi r2, r2, 8 ;;; Convert len to Dwords, unfold x8 lsr.f lp_count, lp_count, 6 lpnz @.Lset64bytes ;; LOOP START PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] #else st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] #endif .Lset64bytes: lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] #else st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] st.ab r4, [r3, 4] #endif .Lset32bytes: and.f lp_count, r2, 0x1F ;Last remaining 31 bytes .Lsmallchunk: lpnz .Lcopy3bytes ;; LOOP START stb.ab r1, [r3, 1] .Lcopy3bytes: j [blink] END_CFI(memset) ENTRY_CFI(memzero) ; adjust bzero args to memset args mov r2, r1 b.d memset ;tail call so need to tinker with blink mov r1, 0 END_CFI(memzero)
aixcc-public/challenge-001-exemplar-source
1,084
arch/arc/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> #define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */ ENTRY_CFI(memset) mov_s r4,r0 or r12,r0,r2 bmsk.f r12,r12,1 extb_s r1,r1 asl r3,r1,8 beq.d .Laligned or_s r1,r1,r3 brls r2,SMALL,.Ltiny add r3,r2,r0 stb r1,[r3,-1] bclr_s r3,r3,0 stw r1,[r3,-2] bmsk.f r12,r0,1 add_s r2,r2,r12 sub.ne r2,r2,4 stb.ab r1,[r4,1] and r4,r4,-2 stw.ab r1,[r4,2] and r4,r4,-4 .Laligned: ; This code address should be aligned for speed. asl r3,r1,16 lsr.f lp_count,r2,2 or_s r1,r1,r3 lpne .Loop_end st.ab r1,[r4,4] .Loop_end: j_s [blink] .balign 4 .Ltiny: mov.f lp_count,r2 lpne .Ltiny_end stb.ab r1,[r4,1] .Ltiny_end: j_s [blink] END_CFI(memset) ; memzero: @r0 = mem, @r1 = size_t ; memset: @r0 = mem, @r1 = char, @r2 = size_t ENTRY_CFI(memzero) ; adjust bzero args to memset args mov r2, r1 mov r1, 0 b memset ;tail call so need to tinker with blink END_CFI(memzero)
aixcc-public/challenge-001-exemplar-source
1,204
arch/arc/lib/strcmp-archs.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> ENTRY_CFI(strcmp) or r2, r0, r1 bmsk_s r2, r2, 1 brne r2, 0, @.Lcharloop ;;; s1 and s2 are word aligned ld.ab r2, [r0, 4] mov_s r12, 0x01010101 ror r11, r12 .align 4 .LwordLoop: ld.ab r3, [r1, 4] ;; Detect NULL char in str1 sub r4, r2, r12 ld.ab r5, [r0, 4] bic r4, r4, r2 and r4, r4, r11 brne.d.nt r4, 0, .LfoundNULL ;; Check if the read locations are the same cmp r2, r3 beq.d .LwordLoop mov.eq r2, r5 ;; A match is found, spot it out #ifdef __LITTLE_ENDIAN__ swape r3, r3 mov_s r0, 1 swape r2, r2 #else mov_s r0, 1 #endif cmp_s r2, r3 j_s.d [blink] bset.lo r0, r0, 31 .align 4 .LfoundNULL: #ifdef __BIG_ENDIAN__ swape r4, r4 swape r2, r2 swape r3, r3 #endif ;; Find null byte ffs r0, r4 bmsk r2, r2, r0 bmsk r3, r3, r0 swape r2, r2 swape r3, r3 ;; make the return value sub.f r0, r2, r3 mov.hi r0, 1 j_s.d [blink] bset.lo r0, r0, 31 .align 4 .Lcharloop: ldb.ab r2, [r0, 1] ldb.ab r3, [r1, 1] nop breq r2, 0, .Lcmpend breq r2, r3, .Lcharloop .align 4 .Lcmpend: j_s.d [blink] sub r0, r2, r3 END_CFI(strcmp)
aixcc-public/challenge-001-exemplar-source
2,507
arch/arc/lib/memcmp.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> #ifdef __LITTLE_ENDIAN__ #define WORD2 r2 #define SHIFT r3 #else /* BIG ENDIAN */ #define WORD2 r3 #define SHIFT r2 #endif ENTRY_CFI(memcmp) or r12,r0,r1 asl_s r12,r12,30 sub r3,r2,1 brls r2,r12,.Lbytewise ld r4,[r0,0] ld r5,[r1,0] lsr.f lp_count,r3,3 #ifdef CONFIG_ISA_ARCV2 /* In ARCv2 a branch can't be the last instruction in a zero overhead * loop. * So we move the branch to the start of the loop, duplicate it * after the end, and set up r12 so that the branch isn't taken * initially. */ mov_s r12,WORD2 lpne .Loop_end brne WORD2,r12,.Lodd ld WORD2,[r0,4] #else lpne .Loop_end ld_s WORD2,[r0,4] #endif ld_s r12,[r1,4] brne r4,r5,.Leven ld.a r4,[r0,8] ld.a r5,[r1,8] #ifdef CONFIG_ISA_ARCV2 .Loop_end: brne WORD2,r12,.Lodd #else brne WORD2,r12,.Lodd .Loop_end: #endif asl_s SHIFT,SHIFT,3 bhs_s .Last_cmp brne r4,r5,.Leven ld r4,[r0,4] ld r5,[r1,4] #ifdef __LITTLE_ENDIAN__ nop_s ; one more load latency cycle .Last_cmp: xor r0,r4,r5 bset r0,r0,SHIFT sub_s r1,r0,1 bic_s r1,r1,r0 norm r1,r1 b.d .Leven_cmp and r1,r1,24 .Leven: xor r0,r4,r5 sub_s r1,r0,1 bic_s r1,r1,r0 norm r1,r1 ; slow track insn and r1,r1,24 .Leven_cmp: asl r2,r4,r1 asl r12,r5,r1 lsr_s r2,r2,1 lsr_s r12,r12,1 j_s.d [blink] sub r0,r2,r12 .balign 4 .Lodd: xor r0,WORD2,r12 sub_s r1,r0,1 bic_s r1,r1,r0 norm r1,r1 ; slow track insn and r1,r1,24 asl_s r2,r2,r1 asl_s r12,r12,r1 lsr_s r2,r2,1 lsr_s r12,r12,1 j_s.d [blink] sub r0,r2,r12 #else /* BIG ENDIAN */ .Last_cmp: neg_s SHIFT,SHIFT lsr r4,r4,SHIFT lsr r5,r5,SHIFT ; slow track insn .Leven: sub.f r0,r4,r5 mov.ne r0,1 j_s.d [blink] bset.cs r0,r0,31 .Lodd: cmp_s WORD2,r12 mov_s r0,1 j_s.d [blink] bset.cs r0,r0,31 #endif /* ENDIAN */ .balign 4 .Lbytewise: breq r2,0,.Lnil ldb r4,[r0,0] ldb r5,[r1,0] lsr.f lp_count,r3 #ifdef CONFIG_ISA_ARCV2 mov r12,r3 lpne .Lbyte_end brne r3,r12,.Lbyte_odd #else lpne .Lbyte_end #endif ldb_s r3,[r0,1] ldb r12,[r1,1] brne r4,r5,.Lbyte_even ldb.a r4,[r0,2] ldb.a r5,[r1,2] #ifdef CONFIG_ISA_ARCV2 .Lbyte_end: brne r3,r12,.Lbyte_odd #else brne r3,r12,.Lbyte_odd .Lbyte_end: #endif bcc .Lbyte_even brne r4,r5,.Lbyte_even ldb_s r3,[r0,1] ldb_s r12,[r1,1] .Lbyte_odd: j_s.d [blink] sub r0,r3,r12 .Lbyte_even: j_s.d [blink] sub r0,r4,r5 .Lnil: j_s.d [blink] mov r0,0 END_CFI(memcmp)
aixcc-public/challenge-001-exemplar-source
1,325
arch/arc/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <linux/linkage.h> ENTRY_CFI(strlen) or r3,r0,7 ld r2,[r3,-7] ld.a r6,[r3,-3] mov r4,0x01010101 ; uses long immediate #ifdef __LITTLE_ENDIAN__ asl_s r1,r0,3 btst_s r0,2 asl r7,r4,r1 ror r5,r4 sub r1,r2,r7 bic_s r1,r1,r2 mov.eq r7,r4 sub r12,r6,r7 bic r12,r12,r6 or.eq r12,r12,r1 and r12,r12,r5 brne r12,0,.Learly_end #else /* BIG ENDIAN */ ror r5,r4 btst_s r0,2 mov_s r1,31 sub3 r7,r1,r0 sub r1,r2,r4 bic_s r1,r1,r2 bmsk r1,r1,r7 sub r12,r6,r4 bic r12,r12,r6 bmsk.ne r12,r12,r7 or.eq r12,r12,r1 and r12,r12,r5 brne r12,0,.Learly_end #endif /* ENDIAN */ .Loop: ld_s r2,[r3,4] ld.a r6,[r3,8] ; stall for load result sub r1,r2,r4 bic_s r1,r1,r2 sub r12,r6,r4 bic r12,r12,r6 or r12,r12,r1 and r12,r12,r5 breq r12,0,.Loop .Lend: and.f r1,r1,r5 sub.ne r3,r3,4 mov.eq r1,r12 #ifdef __LITTLE_ENDIAN__ sub_s r2,r1,1 bic_s r2,r2,r1 norm r1,r2 sub_s r0,r0,3 lsr_s r1,r1,3 sub r0,r3,r0 j_s.d [blink] sub r0,r0,r1 #else /* BIG ENDIAN */ lsr_s r1,r1,7 mov.eq r2,r6 bic_s r1,r1,r2 norm r1,r1 sub r0,r3,r0 lsr_s r1,r1,3 j_s.d [blink] add r0,r0,r1 #endif /* ENDIAN */ .Learly_end: b.d .Lend sub_s.ne r1,r1,r1 END_CFI(strlen)
aixcc-public/challenge-001-exemplar-source
2,511
arch/arc/lib/strcmp.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ /* This is optimized primarily for the ARC700. It would be possible to speed up the loops by one cycle / word respective one cycle / byte by forcing double source 1 alignment, unrolling by a factor of two, and speculatively loading the second word / byte of source 1; however, that would increase the overhead for loop setup / finish, and strcmp might often terminate early. */ #include <linux/linkage.h> ENTRY_CFI(strcmp) or r2,r0,r1 bmsk_s r2,r2,1 brne r2,0,.Lcharloop mov_s r12,0x01010101 ror r5,r12 .Lwordloop: ld.ab r2,[r0,4] ld.ab r3,[r1,4] nop_s sub r4,r2,r12 bic r4,r4,r2 and r4,r4,r5 brne r4,0,.Lfound0 breq r2,r3,.Lwordloop #ifdef __LITTLE_ENDIAN__ xor r0,r2,r3 ; mask for difference sub_s r1,r0,1 bic_s r0,r0,r1 ; mask for least significant difference bit sub r1,r5,r0 xor r0,r5,r1 ; mask for least significant difference byte and_s r2,r2,r0 and_s r3,r3,r0 #endif /* LITTLE ENDIAN */ cmp_s r2,r3 mov_s r0,1 j_s.d [blink] bset.lo r0,r0,31 .balign 4 #ifdef __LITTLE_ENDIAN__ .Lfound0: xor r0,r2,r3 ; mask for difference or r0,r0,r4 ; or in zero indicator sub_s r1,r0,1 bic_s r0,r0,r1 ; mask for least significant difference bit sub r1,r5,r0 xor r0,r5,r1 ; mask for least significant difference byte and_s r2,r2,r0 and_s r3,r3,r0 sub.f r0,r2,r3 mov.hi r0,1 j_s.d [blink] bset.lo r0,r0,31 #else /* BIG ENDIAN */ /* The zero-detection above can mis-detect 0x01 bytes as zeroes because of carry-propagateion from a lower significant zero byte. We can compensate for this by checking that bit0 is zero. This compensation is not necessary in the step where we get a low estimate for r2, because in any affected bytes we already have 0x00 or 0x01, which will remain unchanged when bit 7 is cleared. */ .balign 4 .Lfound0: lsr r0,r4,8 lsr_s r1,r2 bic_s r2,r2,r0 ; get low estimate for r2 and get ... bic_s r0,r0,r1 ; <this is the adjusted mask for zeros> or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ... cmp_s r3,r2 ; ... be independent of trailing garbage or_s r2,r2,r0 ; likewise for r3 > r2 bic_s r3,r3,r0 rlc r0,0 ; r0 := r2 > r3 ? 1 : 0 cmp_s r2,r3 j_s.d [blink] bset.lo r0,r0,31 #endif /* ENDIAN */ .balign 4 .Lcharloop: ldb.ab r2,[r0,1] ldb.ab r3,[r1,1] nop_s breq r2,0,.Lcmpend breq r2,r3,.Lcharloop .Lcmpend: j_s.d [blink] sub r0,r2,r3 END_CFI(strcmp)
aixcc-public/challenge-001-exemplar-source
11,491
arch/arc/mm/tlbex.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * TLB Exception Handling for ARC * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: April 2011 : * -MMU v1: moved out legacy code into a seperate file * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, * helps avoid a shift when preparing PD0 from PTE * * Vineetg: July 2009 * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB * entry, so that it doesn't knock out it's I-TLB entry * -Some more fine tuning: * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc * * Vineetg: July 2009 * -Practically rewrote the I/D TLB Miss handlers * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. * Hence Leaner by 1.5 K * Used Conditional arithmetic to replace excessive branching * Also used short instructions wherever possible * * Vineetg: Aug 13th 2008 * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing * more information in case of a Fatality * * Vineetg: March 25th Bug #92690 * -Added Debug Code to check if sw-ASID == hw-ASID * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/entry.h> #include <asm/mmu.h> #include <asm/arcregs.h> #include <asm/cache.h> #include <asm/processor.h> #ifdef CONFIG_ISA_ARCOMPACT ;----------------------------------------------------------------- ; ARC700 Exception Handling doesn't auto-switch stack and it only provides ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" ; ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a ; "global" is used to free-up FIRST core reg to be able to code the rest of ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 ; need to be saved as well by extending the "global" to be 4 words. Hence ; ".size ex_saved_reg1, 16" ; [All of this dance is to avoid stack switching for each TLB Miss, since we ; only need to save only a handful of regs, as opposed to complete reg file] ; ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST ; core reg as it will not be SMP safe. ; Thus scratch AUX reg is used (and no longer used to cache task PGD). ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". ; Epilogue thus has to locate the "per-cpu" storage for regs. ; To avoid cache line bouncing the per-cpu global is aligned/sized per ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" ; As simple as that.... ;-------------------------------------------------------------------------- ; scratch memory to save [r0-r3] used to code TLB refill Handler ARCFP_DATA ex_saved_reg1 .align 1 << L1_CACHE_SHIFT .type ex_saved_reg1, @object #ifdef CONFIG_SMP .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) ex_saved_reg1: .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) #else .size ex_saved_reg1, 16 ex_saved_reg1: .zero 16 #endif .macro TLBMISS_FREEUP_REGS #ifdef CONFIG_SMP sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with GET_CPU_ID r0 ; get to per cpu scratch mem, asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu add r0, @ex_saved_reg1, r0 #else st r0, [@ex_saved_reg1] mov_s r0, @ex_saved_reg1 #endif st_s r1, [r0, 4] st_s r2, [r0, 8] st_s r3, [r0, 12] .endm .macro TLBMISS_RESTORE_REGS #ifdef CONFIG_SMP GET_CPU_ID r0 ; get to per cpu scratch mem asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide add r0, @ex_saved_reg1, r0 ld_s r3, [r0,12] ld_s r2, [r0, 8] ld_s r1, [r0, 4] lr r0, [ARC_REG_SCRATCH_DATA0] #else mov_s r0, @ex_saved_reg1 ld_s r3, [r0,12] ld_s r2, [r0, 8] ld_s r1, [r0, 4] ld_s r0, [r0] #endif .endm #else /* ARCv2 */ .macro TLBMISS_FREEUP_REGS #ifdef CONFIG_ARC_HAS_LL64 std r0, [sp, -16] std r2, [sp, -8] #else PUSH r0 PUSH r1 PUSH r2 PUSH r3 #endif .endm .macro TLBMISS_RESTORE_REGS #ifdef CONFIG_ARC_HAS_LL64 ldd r0, [sp, -16] ldd r2, [sp, -8] #else POP r3 POP r2 POP r1 POP r0 #endif .endm #endif ;============================================================================ ;TLB Miss handling Code ;============================================================================ #ifndef PMD_SHIFT #define PMD_SHIFT PUD_SHIFT #endif #ifndef PUD_SHIFT #define PUD_SHIFT PGDIR_SHIFT #endif ;----------------------------------------------------------------------------- ; This macro does the page-table lookup for the faulting address. ; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address .macro LOAD_FAULT_PTE lr r2, [efa] #ifdef CONFIG_ISA_ARCV2 lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd #else GET_CURR_TASK_ON_CPU r1 ld r1, [r1, TASK_ACT_MM] ld r1, [r1, MM_PGD] #endif lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr tst r3, r3 bz do_slow_path_pf ; if no Page Table, do page fault #if CONFIG_PGTABLE_LEVELS > 3 lsr r0, r2, PUD_SHIFT ; Bits for indexing into PUD and r0, r0, (PTRS_PER_PUD - 1) ld.as r1, [r3, r0] ; PMD entry tst r1, r1 bz do_slow_path_pf mov r3, r1 #endif #if CONFIG_PGTABLE_LEVELS > 2 lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD and r0, r0, (PTRS_PER_PMD - 1) ld.as r1, [r3, r0] ; PMD entry tst r1, r1 bz do_slow_path_pf mov r3, r1 #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp) add2.nz r1, r1, r0 bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk mov.nz r0, r3 #endif and r1, r3, PAGE_MASK ; Get the PTE entry: The idea is ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index ; (3) z = (pgtbl + y * 4) #ifdef CONFIG_ARC_HAS_PAE40 #define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */ #else #define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */ #endif ; multiply in step (3) above avoided by shifting lesser in step (1) lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG ) and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG ) ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40) ; r1: PTE ptr 2: .endm ;----------------------------------------------------------------- ; Convert Linux PTE entry into TLB entry ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu ; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI]) ; IN: r0 = PTE, r1 = ptr to PTE .macro CONV_PTE_TO_TLB and r3, r0, PTE_BITS_RWX ; r w x asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) and.f 0, r0, _PAGE_GLOBAL or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE or r3, r3, r2 sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C #ifdef CONFIG_ARC_HAS_PAE40 ld r3, [r1, 4] ; paddr[39..32] sr r3, [ARC_REG_TLBPD1HI] #endif and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid or r3, r3, r2 ; S | vaddr | {sasid|asid} sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 .endm ;----------------------------------------------------------------- ; Commit the TLB entry into MMU .macro COMMIT_ENTRY_TO_MMU #ifdef CONFIG_ARC_MMU_V3 /* Get free TLB slot: Set = computed from vaddr, way = random */ sr TLBGetIndex, [ARC_REG_TLBCOMMAND] /* Commit the Write */ sr TLBWriteNI, [ARC_REG_TLBCOMMAND] #else sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] #endif 88: .endm ARCFP_CODE ;Fast Path Code, candidate for ICCM ;----------------------------------------------------------------------------- ; I-TLB Miss Exception Handler ;----------------------------------------------------------------------------- ENTRY(EV_TLBMissI) TLBMISS_FREEUP_REGS ;---------------------------------------------------------------- ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA LOAD_FAULT_PTE ;---------------------------------------------------------------- ; VERIFY_PTE: Check if PTE permissions approp for executing code cmp_s r2, VMALLOC_START mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) or.hs r2, r2, _PAGE_GLOBAL and r3, r0, r2 ; Mask out NON Flag bits from PTE xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) bnz do_slow_path_pf ; Let Linux VM know that the page was accessed or r0, r0, _PAGE_ACCESSED ; set Accessed Bit st_s r0, [r1] ; Write back PTE CONV_PTE_TO_TLB COMMIT_ENTRY_TO_MMU TLBMISS_RESTORE_REGS EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation rtie END(EV_TLBMissI) ;----------------------------------------------------------------------------- ; D-TLB Miss Exception Handler ;----------------------------------------------------------------------------- ENTRY(EV_TLBMissD) TLBMISS_FREEUP_REGS ;---------------------------------------------------------------- ; Get the PTE corresponding to V-addr accessed ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA LOAD_FAULT_PTE ;---------------------------------------------------------------- ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) cmp_s r2, VMALLOC_START mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only ; Linux PTE [RWX] bits are semantically overloaded: ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) ; -Otherwise they are user-mode permissions, and those are exactly ; same for kernel mode as well (e.g. copy_(to|from)_user) lr r3, [ecr] btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE ; Above laddering takes care of XCHG access (both R and W) ; By now, r2 setup with all the Flags we need to check in PTE and r3, r0, r2 ; Mask out NON Flag bits from PTE brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) ;---------------------------------------------------------------- ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty or r0, r0, _PAGE_ACCESSED ; Accessed bit always or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well st_s r0, [r1] ; Write back PTE CONV_PTE_TO_TLB COMMIT_ENTRY_TO_MMU TLBMISS_RESTORE_REGS EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation rtie ;-------- Common routine to call Linux Page Fault Handler ----------- do_slow_path_pf: #ifdef CONFIG_ISA_ARCV2 ; Set Z flag if exception in U mode. Hardware micro-ops do this on any ; taken interrupt/exception, and thus is already the case at the entry ; above, but ensuing code would have already clobbered. ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set lr r2, [erstatus] and r2, r2, STATUS_U_MASK bxor.f 0, r2, STATUS_U_BIT #endif ; Restore the 4-scratch regs saved by fast path miss handler TLBMISS_RESTORE_REGS ; Slow path TLB Miss handled as a regular ARC Exception ; (stack switching / save the complete reg-file). b call_do_page_fault END(EV_TLBMissD)
aixcc-public/challenge-001-exemplar-source
40,590
arch/openrisc/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OpenRISC head.S * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> */ #include <linux/linkage.h> #include <linux/threads.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/serial_reg.h> #include <linux/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/spr_defs.h> #include <asm/asm-offsets.h> #include <linux/of_fdt.h> #define tophys(rd,rs) \ l.movhi rd,hi(-KERNELBASE) ;\ l.add rd,rd,rs #define CLEAR_GPR(gpr) \ l.movhi gpr,0x0 #define LOAD_SYMBOL_2_GPR(gpr,symbol) \ l.movhi gpr,hi(symbol) ;\ l.ori gpr,gpr,lo(symbol) #define UART_BASE_ADD 0x90000000 #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM) #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM) /* ============================================[ tmp store locations ]=== */ #define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32) /* * emergency_print temporary stores */ #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS #define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14) #define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14) #define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15) #define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15) #define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16) #define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16) #define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7) #define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7) #define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8) #define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8) #define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9) #define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9) #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5 #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0) #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0) #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7 #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0) #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8 #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0) #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) #endif /* * TLB miss handlers temorary stores */ #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS #define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2) #define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2) #define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3) #define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3) #define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4) #define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4) #define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5) #define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5) #define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6) #define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6) #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3 #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0) #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4 #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0) #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5 #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0) #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) #endif /* * EXCEPTION_HANDLE temporary stores */ #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS #define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30) #define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30) #define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10) #define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10) #define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1) #define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1) #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) #endif /* =========================================================[ macros ]=== */ #ifdef CONFIG_SMP #define GET_CURRENT_PGD(reg,t1) \ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ l.mfspr t1,r0,SPR_COREID ;\ l.slli t1,t1,2 ;\ l.add reg,reg,t1 ;\ tophys (t1,reg) ;\ l.lwz reg,0(t1) #else #define GET_CURRENT_PGD(reg,t1) \ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ tophys (t1,reg) ;\ l.lwz reg,0(t1) #endif /* Load r10 from current_thread_info_set - clobbers r1 and r30 */ #ifdef CONFIG_SMP #define GET_CURRENT_THREAD_INFO \ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ tophys (r30,r1) ;\ l.mfspr r10,r0,SPR_COREID ;\ l.slli r10,r10,2 ;\ l.add r30,r30,r10 ;\ /* r10: current_thread_info */ ;\ l.lwz r10,0(r30) #else #define GET_CURRENT_THREAD_INFO \ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ tophys (r30,r1) ;\ /* r10: current_thread_info */ ;\ l.lwz r10,0(r30) #endif /* * DSCR: this is a common hook for handling exceptions. it will save * the needed registers, set up stack and pointer to current * then jump to the handler while enabling MMU * * PRMS: handler - a function to jump to. it has to save the * remaining registers to kernel stack, call * appropriate arch-independant exception handler * and finaly jump to ret_from_except * * PREQ: unchanged state from the time exception happened * * POST: SAVED the following registers original value * to the new created exception frame pointed to by r1 * * r1 - ksp pointing to the new (exception) frame * r4 - EEAR exception EA * r10 - current pointing to current_thread_info struct * r12 - syscall 0, since we didn't come from syscall * r30 - handler address of the handler we'll jump to * * handler has to save remaining registers to the exception * ksp frame *before* tainting them! * * NOTE: this function is not reentrant per se. reentrancy is guaranteed * by processor disabling all exceptions/interrupts when exception * accours. * * OPTM: no need to make it so wasteful to extract ksp when in user mode */ #define EXCEPTION_HANDLE(handler) \ EXCEPTION_T_STORE_GPR30 ;\ l.mfspr r30,r0,SPR_ESR_BASE ;\ l.andi r30,r30,SPR_SR_SM ;\ l.sfeqi r30,0 ;\ EXCEPTION_T_STORE_GPR10 ;\ l.bnf 2f /* kernel_mode */ ;\ EXCEPTION_T_STORE_SP /* delay slot */ ;\ 1: /* user_mode: */ ;\ GET_CURRENT_THREAD_INFO ;\ tophys (r30,r10) ;\ l.lwz r1,(TI_KSP)(r30) ;\ /* fall through */ ;\ 2: /* kernel_mode: */ ;\ /* create new stack frame, save only needed gprs */ ;\ /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\ /* r12: temp, syscall indicator */ ;\ l.addi r1,r1,-(INT_FRAME_SIZE) ;\ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ l.sw PT_GPR12(r30),r12 ;\ /* r4 use for tmp before EA */ ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ l.sw PT_SR(r30),r12 ;\ /* save r30 */ ;\ EXCEPTION_T_LOAD_GPR30(r12) ;\ l.sw PT_GPR30(r30),r12 ;\ /* save r10 as was prior to exception */ ;\ EXCEPTION_T_LOAD_GPR10(r12) ;\ l.sw PT_GPR10(r30),r12 ;\ /* save PT_SP as was prior to exception */ ;\ EXCEPTION_T_LOAD_SP(r12) ;\ l.sw PT_SP(r30),r12 ;\ /* save exception r4, set r4 = EA */ ;\ l.sw PT_GPR4(r30),r4 ;\ l.mfspr r4,r0,SPR_EEAR_BASE ;\ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- turn on MMU ----- */ ;\ /* Carry DSX into exception SR */ ;\ l.mfspr r30,r0,SPR_SR ;\ l.andi r30,r30,SPR_SR_DSX ;\ l.ori r30,r30,(EXCEPTION_SR) ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r30: EA address of handler */ ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\ l.mtspr r0,r30,SPR_EPCR_BASE ;\ l.rfe /* * this doesn't work * * * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION * #define UNHANDLED_EXCEPTION(handler) \ * l.ori r3,r0,0x1 ;\ * l.mtspr r0,r3,SPR_SR ;\ * l.movhi r3,hi(0xf0000100) ;\ * l.ori r3,r3,lo(0xf0000100) ;\ * l.jr r3 ;\ * l.nop 1 * * #endif */ /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just * a bit more carefull (if we have a PT_SP or current pointer * corruption) and set them up from 'current_set' * */ #define UNHANDLED_EXCEPTION(handler) \ EXCEPTION_T_STORE_GPR30 ;\ EXCEPTION_T_STORE_GPR10 ;\ EXCEPTION_T_STORE_SP ;\ /* temporary store r3, r9 into r1, r10 */ ;\ l.addi r1,r3,0x0 ;\ l.addi r10,r9,0x0 ;\ LOAD_SYMBOL_2_GPR(r9,_string_unhandled_exception) ;\ tophys (r3,r9) ;\ l.jal _emergency_print ;\ l.nop ;\ l.mfspr r3,r0,SPR_NPC ;\ l.jal _emergency_print_nr ;\ l.andi r3,r3,0x1f00 ;\ LOAD_SYMBOL_2_GPR(r9,_string_epc_prefix) ;\ tophys (r3,r9) ;\ l.jal _emergency_print ;\ l.nop ;\ l.jal _emergency_print_nr ;\ l.mfspr r3,r0,SPR_EPCR_BASE ;\ LOAD_SYMBOL_2_GPR(r9,_string_nl) ;\ tophys (r3,r9) ;\ l.jal _emergency_print ;\ l.nop ;\ /* end of printing */ ;\ l.addi r3,r1,0x0 ;\ l.addi r9,r10,0x0 ;\ /* extract current, ksp from current_set */ ;\ LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\ LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\ /* create new stack frame, save only needed gprs */ ;\ /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ /* r12: temp, syscall indicator, r13 temp */ ;\ l.addi r1,r1,-(INT_FRAME_SIZE) ;\ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ l.sw PT_GPR12(r30),r12 ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ l.sw PT_SR(r30),r12 ;\ /* save r31 */ ;\ EXCEPTION_T_LOAD_GPR30(r12) ;\ l.sw PT_GPR30(r30),r12 ;\ /* save r10 as was prior to exception */ ;\ EXCEPTION_T_LOAD_GPR10(r12) ;\ l.sw PT_GPR10(r30),r12 ;\ /* save PT_SP as was prior to exception */ ;\ EXCEPTION_T_LOAD_SP(r12) ;\ l.sw PT_SP(r30),r12 ;\ l.sw PT_GPR13(r30),r13 ;\ /* --> */ ;\ /* save exception r4, set r4 = EA */ ;\ l.sw PT_GPR4(r30),r4 ;\ l.mfspr r4,r0,SPR_EEAR_BASE ;\ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- play a MMU trick ----- */ ;\ l.ori r30,r0,(EXCEPTION_SR) ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r31: EA address of handler */ ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\ l.mtspr r0,r30,SPR_EPCR_BASE ;\ l.rfe /* =====================================================[ exceptions] === */ /* ---[ 0x100: RESET exception ]----------------------------------------- */ .org 0x100 /* Jump to .init code at _start which lives in the .head section * and will be discarded after boot. */ LOAD_SYMBOL_2_GPR(r15, _start) tophys (r13,r15) /* MMU disabled */ l.jr r13 l.nop /* ---[ 0x200: BUS exception ]------------------------------------------- */ .org 0x200 _dispatch_bus_fault: EXCEPTION_HANDLE(_bus_fault_handler) /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ .org 0x300 _dispatch_do_dpage_fault: // totaly disable timer interrupt // l.mtspr r0,r0,SPR_TTMR // DEBUG_TLB_PROBE(0x300) // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300) EXCEPTION_HANDLE(_data_page_fault_handler) /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ .org 0x400 _dispatch_do_ipage_fault: // totaly disable timer interrupt // l.mtspr r0,r0,SPR_TTMR // DEBUG_TLB_PROBE(0x400) // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400) EXCEPTION_HANDLE(_insn_page_fault_handler) /* ---[ 0x500: Timer exception ]----------------------------------------- */ .org 0x500 EXCEPTION_HANDLE(_timer_handler) /* ---[ 0x600: Alignment exception ]-------------------------------------- */ .org 0x600 EXCEPTION_HANDLE(_alignment_handler) /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ .org 0x700 EXCEPTION_HANDLE(_illegal_instruction_handler) /* ---[ 0x800: External interrupt exception ]---------------------------- */ .org 0x800 EXCEPTION_HANDLE(_external_irq_handler) /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ .org 0x900 l.j boot_dtlb_miss_handler l.nop /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ .org 0xa00 l.j boot_itlb_miss_handler l.nop /* ---[ 0xb00: Range exception ]----------------------------------------- */ .org 0xb00 UNHANDLED_EXCEPTION(_vector_0xb00) /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ .org 0xc00 EXCEPTION_HANDLE(_sys_call_handler) /* ---[ 0xd00: Trap exception ]------------------------------------------ */ .org 0xd00 UNHANDLED_EXCEPTION(_vector_0xd00) /* ---[ 0xe00: Trap exception ]------------------------------------------ */ .org 0xe00 // UNHANDLED_EXCEPTION(_vector_0xe00) EXCEPTION_HANDLE(_trap_handler) /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ .org 0xf00 UNHANDLED_EXCEPTION(_vector_0xf00) /* ---[ 0x1000: Reserved exception ]------------------------------------- */ .org 0x1000 UNHANDLED_EXCEPTION(_vector_0x1000) /* ---[ 0x1100: Reserved exception ]------------------------------------- */ .org 0x1100 UNHANDLED_EXCEPTION(_vector_0x1100) /* ---[ 0x1200: Reserved exception ]------------------------------------- */ .org 0x1200 UNHANDLED_EXCEPTION(_vector_0x1200) /* ---[ 0x1300: Reserved exception ]------------------------------------- */ .org 0x1300 UNHANDLED_EXCEPTION(_vector_0x1300) /* ---[ 0x1400: Reserved exception ]------------------------------------- */ .org 0x1400 UNHANDLED_EXCEPTION(_vector_0x1400) /* ---[ 0x1500: Reserved exception ]------------------------------------- */ .org 0x1500 UNHANDLED_EXCEPTION(_vector_0x1500) /* ---[ 0x1600: Reserved exception ]------------------------------------- */ .org 0x1600 UNHANDLED_EXCEPTION(_vector_0x1600) /* ---[ 0x1700: Reserved exception ]------------------------------------- */ .org 0x1700 UNHANDLED_EXCEPTION(_vector_0x1700) /* ---[ 0x1800: Reserved exception ]------------------------------------- */ .org 0x1800 UNHANDLED_EXCEPTION(_vector_0x1800) /* ---[ 0x1900: Reserved exception ]------------------------------------- */ .org 0x1900 UNHANDLED_EXCEPTION(_vector_0x1900) /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ .org 0x1a00 UNHANDLED_EXCEPTION(_vector_0x1a00) /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ .org 0x1b00 UNHANDLED_EXCEPTION(_vector_0x1b00) /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ .org 0x1c00 UNHANDLED_EXCEPTION(_vector_0x1c00) /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ .org 0x1d00 UNHANDLED_EXCEPTION(_vector_0x1d00) /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ .org 0x1e00 UNHANDLED_EXCEPTION(_vector_0x1e00) /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ .org 0x1f00 UNHANDLED_EXCEPTION(_vector_0x1f00) .org 0x2000 /* ===================================================[ kernel start ]=== */ /* .text*/ /* This early stuff belongs in HEAD, but some of the functions below definitely * don't... */ __HEAD .global _start _start: /* Init r0 to zero as per spec */ CLEAR_GPR(r0) /* save kernel parameters */ l.or r25,r0,r3 /* pointer to fdt */ /* * ensure a deterministic start */ l.ori r3,r0,0x1 l.mtspr r0,r3,SPR_SR /* * Start the TTCR as early as possible, so that the RNG can make use of * measurements of boot time from the earliest opportunity. Especially * important is that the TTCR does not return zero by the time we reach * random_init(). */ l.movhi r3,hi(SPR_TTMR_CR) l.mtspr r0,r3,SPR_TTMR CLEAR_GPR(r1) CLEAR_GPR(r2) CLEAR_GPR(r3) CLEAR_GPR(r4) CLEAR_GPR(r5) CLEAR_GPR(r6) CLEAR_GPR(r7) CLEAR_GPR(r8) CLEAR_GPR(r9) CLEAR_GPR(r10) CLEAR_GPR(r11) CLEAR_GPR(r12) CLEAR_GPR(r13) CLEAR_GPR(r14) CLEAR_GPR(r15) CLEAR_GPR(r16) CLEAR_GPR(r17) CLEAR_GPR(r18) CLEAR_GPR(r19) CLEAR_GPR(r20) CLEAR_GPR(r21) CLEAR_GPR(r22) CLEAR_GPR(r23) CLEAR_GPR(r24) CLEAR_GPR(r26) CLEAR_GPR(r27) CLEAR_GPR(r28) CLEAR_GPR(r29) CLEAR_GPR(r30) CLEAR_GPR(r31) #ifdef CONFIG_SMP l.mfspr r26,r0,SPR_COREID l.sfeq r26,r0 l.bnf secondary_wait l.nop #endif /* * set up initial ksp and current */ /* setup kernel stack */ LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE) LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current tophys (r31,r10) l.sw TI_KSP(r31), r1 l.ori r4,r0,0x0 /* * .data contains initialized data, * .bss contains uninitialized data - clear it up */ clear_bss: LOAD_SYMBOL_2_GPR(r24, __bss_start) LOAD_SYMBOL_2_GPR(r26, _end) tophys(r28,r24) tophys(r30,r26) CLEAR_GPR(r24) CLEAR_GPR(r26) 1: l.sw (0)(r28),r0 l.sfltu r28,r30 l.bf 1b l.addi r28,r28,4 enable_ic: l.jal _ic_enable l.nop enable_dc: l.jal _dc_enable l.nop flush_tlb: l.jal _flush_tlb l.nop /* The MMU needs to be enabled before or1k_early_setup is called */ enable_mmu: /* * enable dmmu & immu * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0 */ l.mfspr r30,r0,SPR_SR l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) l.or r30,r30,r28 l.mtspr r0,r30,SPR_SR l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop // reset the simulation counters l.nop 5 /* check fdt header magic word */ l.lwz r3,0(r25) /* load magic from fdt into r3 */ l.movhi r4,hi(OF_DT_HEADER) l.ori r4,r4,lo(OF_DT_HEADER) l.sfeq r3,r4 l.bf _fdt_found l.nop /* magic number mismatch, set fdt pointer to null */ l.or r25,r0,r0 _fdt_found: /* pass fdt pointer to or1k_early_setup in r3 */ l.or r3,r0,r25 LOAD_SYMBOL_2_GPR(r24, or1k_early_setup) l.jalr r24 l.nop clear_regs: /* * clear all GPRS to increase determinism */ CLEAR_GPR(r2) CLEAR_GPR(r3) CLEAR_GPR(r4) CLEAR_GPR(r5) CLEAR_GPR(r6) CLEAR_GPR(r7) CLEAR_GPR(r8) CLEAR_GPR(r9) CLEAR_GPR(r11) CLEAR_GPR(r12) CLEAR_GPR(r13) CLEAR_GPR(r14) CLEAR_GPR(r15) CLEAR_GPR(r16) CLEAR_GPR(r17) CLEAR_GPR(r18) CLEAR_GPR(r19) CLEAR_GPR(r20) CLEAR_GPR(r21) CLEAR_GPR(r22) CLEAR_GPR(r23) CLEAR_GPR(r24) CLEAR_GPR(r25) CLEAR_GPR(r26) CLEAR_GPR(r27) CLEAR_GPR(r28) CLEAR_GPR(r29) CLEAR_GPR(r30) CLEAR_GPR(r31) jump_start_kernel: /* * jump to kernel entry (start_kernel) */ LOAD_SYMBOL_2_GPR(r30, start_kernel) l.jr r30 l.nop _flush_tlb: /* * I N V A L I D A T E T L B e n t r i e s */ LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0)) LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0)) l.addi r7,r0,128 /* Maximum number of sets */ 1: l.mtspr r5,r0,0x0 l.mtspr r6,r0,0x0 l.addi r5,r5,1 l.addi r6,r6,1 l.sfeq r7,r0 l.bnf 1b l.addi r7,r7,-1 l.jr r9 l.nop #ifdef CONFIG_SMP secondary_wait: /* Doze the cpu until we are asked to run */ /* If we dont have power management skip doze */ l.mfspr r25,r0,SPR_UPR l.andi r25,r25,SPR_UPR_PMP l.sfeq r25,r0 l.bf secondary_check_release l.nop /* Setup special secondary exception handler */ LOAD_SYMBOL_2_GPR(r3, _secondary_evbar) tophys(r25,r3) l.mtspr r0,r25,SPR_EVBAR /* Enable Interrupts */ l.mfspr r25,r0,SPR_SR l.ori r25,r25,SPR_SR_IEE l.mtspr r0,r25,SPR_SR /* Unmask interrupts interrupts */ l.mfspr r25,r0,SPR_PICMR l.ori r25,r25,0xffff l.mtspr r0,r25,SPR_PICMR /* Doze */ l.mfspr r25,r0,SPR_PMR LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME) l.or r25,r25,r3 l.mtspr r0,r25,SPR_PMR /* Wakeup - Restore exception handler */ l.mtspr r0,r0,SPR_EVBAR secondary_check_release: /* * Check if we actually got the release signal, if not go-back to * sleep. */ l.mfspr r25,r0,SPR_COREID LOAD_SYMBOL_2_GPR(r3, secondary_release) tophys(r4, r3) l.lwz r3,0(r4) l.sfeq r25,r3 l.bnf secondary_wait l.nop /* fall through to secondary_init */ secondary_init: /* * set up initial ksp and current */ LOAD_SYMBOL_2_GPR(r10, secondary_thread_info) tophys (r30,r10) l.lwz r10,0(r30) l.addi r1,r10,THREAD_SIZE tophys (r30,r10) l.sw TI_KSP(r30),r1 l.jal _ic_enable l.nop l.jal _dc_enable l.nop l.jal _flush_tlb l.nop /* * enable dmmu & immu */ l.mfspr r30,r0,SPR_SR l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) l.or r30,r30,r28 /* * This is a bit tricky, we need to switch over from physical addresses * to virtual addresses on the fly. * To do that, we first set up ESR with the IME and DME bits set. * Then EPCR is set to secondary_start and then a l.rfe is issued to * "jump" to that. */ l.mtspr r0,r30,SPR_ESR_BASE LOAD_SYMBOL_2_GPR(r30, secondary_start) l.mtspr r0,r30,SPR_EPCR_BASE l.rfe secondary_start: LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel) l.jr r30 l.nop #endif /* ========================================[ cache ]=== */ /* alignment here so we don't change memory offsets with * memory controller defined */ .align 0x2000 _ic_enable: /* Check if IC present and skip enabling otherwise */ l.mfspr r24,r0,SPR_UPR l.andi r26,r24,SPR_UPR_ICP l.sfeq r26,r0 l.bf 9f l.nop /* Disable IC */ l.mfspr r6,r0,SPR_SR l.addi r5,r0,-1 l.xori r5,r5,SPR_SR_ICE l.and r5,r6,r5 l.mtspr r0,r5,SPR_SR /* Establish cache block size If BS=0, 16; If BS=1, 32; r14 contain block size */ l.mfspr r24,r0,SPR_ICCFGR l.andi r26,r24,SPR_ICCFGR_CBS l.srli r28,r26,7 l.ori r30,r0,16 l.sll r14,r30,r28 /* Establish number of cache sets r16 contains number of cache sets r28 contains log(# of cache sets) */ l.andi r26,r24,SPR_ICCFGR_NCS l.srli r28,r26,3 l.ori r30,r0,1 l.sll r16,r30,r28 /* Invalidate IC */ l.addi r6,r0,0 l.sll r5,r14,r28 // l.mul r5,r14,r16 // l.trap 1 // l.addi r5,r0,IC_SIZE 1: l.mtspr r0,r6,SPR_ICBIR l.sfne r6,r5 l.bf 1b l.add r6,r6,r14 // l.addi r6,r6,IC_LINE /* Enable IC */ l.mfspr r6,r0,SPR_SR l.ori r6,r6,SPR_SR_ICE l.mtspr r0,r6,SPR_SR l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop l.nop 9: l.jr r9 l.nop _dc_enable: /* Check if DC present and skip enabling otherwise */ l.mfspr r24,r0,SPR_UPR l.andi r26,r24,SPR_UPR_DCP l.sfeq r26,r0 l.bf 9f l.nop /* Disable DC */ l.mfspr r6,r0,SPR_SR l.addi r5,r0,-1 l.xori r5,r5,SPR_SR_DCE l.and r5,r6,r5 l.mtspr r0,r5,SPR_SR /* Establish cache block size If BS=0, 16; If BS=1, 32; r14 contain block size */ l.mfspr r24,r0,SPR_DCCFGR l.andi r26,r24,SPR_DCCFGR_CBS l.srli r28,r26,7 l.ori r30,r0,16 l.sll r14,r30,r28 /* Establish number of cache sets r16 contains number of cache sets r28 contains log(# of cache sets) */ l.andi r26,r24,SPR_DCCFGR_NCS l.srli r28,r26,3 l.ori r30,r0,1 l.sll r16,r30,r28 /* Invalidate DC */ l.addi r6,r0,0 l.sll r5,r14,r28 1: l.mtspr r0,r6,SPR_DCBIR l.sfne r6,r5 l.bf 1b l.add r6,r6,r14 /* Enable DC */ l.mfspr r6,r0,SPR_SR l.ori r6,r6,SPR_SR_DCE l.mtspr r0,r6,SPR_SR 9: l.jr r9 l.nop /* ===============================================[ page table masks ]=== */ #define DTLB_UP_CONVERT_MASK 0x3fa #define ITLB_UP_CONVERT_MASK 0x3a /* for SMP we'd have (this is a bit subtle, CC must be always set * for SMP, but since we have _PAGE_PRESENT bit always defined * we can just modify the mask) */ #define DTLB_SMP_CONVERT_MASK 0x3fb #define ITLB_SMP_CONVERT_MASK 0x3b /* ---[ boot dtlb miss handler ]----------------------------------------- */ boot_dtlb_miss_handler: /* mask for DTLB_MR register: - (0) sets V (valid) bit, * - (31-12) sets bits belonging to VPN (31-12) */ #define DTLB_MR_MASK 0xfffff001 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit, * - (4) sets A (access) bit, * - (5) sets D (dirty) bit, * - (8) sets SRE (superuser read) bit * - (9) sets SWE (superuser write) bit * - (31-12) sets bits belonging to VPN (31-12) */ #define DTLB_TR_MASK 0xfffff332 /* These are for masking out the VPN/PPN value from the MR/TR registers... * it's not the same as the PFN */ #define VPN_MASK 0xfffff000 #define PPN_MASK 0xfffff000 EXCEPTION_STORE_GPR6 #if 0 l.mfspr r6,r0,SPR_ESR_BASE // l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? l.sfeqi r6,0 // r6 == 0x1 --> SM l.bf exit_with_no_dtranslation // l.nop #endif /* this could be optimized by moving storing of * non r6 registers here, and jumping r6 restore * if not in supervisor mode */ EXCEPTION_STORE_GPR2 EXCEPTION_STORE_GPR3 EXCEPTION_STORE_GPR4 EXCEPTION_STORE_GPR5 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA immediate_translation: CLEAR_GPR(r6) l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) l.mfspr r6, r0, SPR_DMMUCFGR l.andi r6, r6, SPR_DMMUCFGR_NTS l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF l.ori r5, r0, 0x1 l.sll r5, r5, r6 // r5 = number DMMU sets l.addi r6, r5, -1 // r6 = nsets mask l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK l.or r6,r6,r4 // r6 <- r4 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000 l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR /* set up DTLB with no translation for EA <= 0xbfffffff */ LOAD_SYMBOL_2_GPR(r6,0xbfffffff) l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA) l.bf 1f // goto out l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) tophys(r3,r4) // r3 <- PA 1: l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000 l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR EXCEPTION_LOAD_GPR6 EXCEPTION_LOAD_GPR5 EXCEPTION_LOAD_GPR4 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR2 l.rfe // SR <- ESR, PC <- EPC exit_with_no_dtranslation: /* EA out of memory or not in supervisor mode */ EXCEPTION_LOAD_GPR6 EXCEPTION_LOAD_GPR4 l.j _dispatch_bus_fault /* ---[ boot itlb miss handler ]----------------------------------------- */ boot_itlb_miss_handler: /* mask for ITLB_MR register: - sets V (valid) bit, * - sets bits belonging to VPN (15-12) */ #define ITLB_MR_MASK 0xfffff001 /* mask for ITLB_TR register: - sets A (access) bit, * - sets SXE (superuser execute) bit * - sets bits belonging to VPN (15-12) */ #define ITLB_TR_MASK 0xfffff050 /* #define VPN_MASK 0xffffe000 #define PPN_MASK 0xffffe000 */ EXCEPTION_STORE_GPR2 EXCEPTION_STORE_GPR3 EXCEPTION_STORE_GPR4 EXCEPTION_STORE_GPR5 EXCEPTION_STORE_GPR6 #if 0 l.mfspr r6,r0,SPR_ESR_BASE // l.andi r6,r6,SPR_SR_SM // are we in kernel mode ? l.sfeqi r6,0 // r6 == 0x1 --> SM l.bf exit_with_no_itranslation l.nop #endif l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA earlyearly: CLEAR_GPR(r6) l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb) l.mfspr r6, r0, SPR_IMMUCFGR l.andi r6, r6, SPR_IMMUCFGR_NTS l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF l.ori r5, r0, 0x1 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR l.addi r6, r5, -1 // r6 = nsets mask l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK l.or r6,r6,r4 // r6 <- r4 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000 l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR /* * set up ITLB with no translation for EA <= 0x0fffffff * * we need this for head.S mapping (EA = PA). if we move all functions * which run with mmu enabled into entry.S, we might be able to eliminate this. * */ LOAD_SYMBOL_2_GPR(r6,0x0fffffff) l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA) l.bf 1f // goto out l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1) tophys(r3,r4) // r3 <- PA 1: l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000 l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR EXCEPTION_LOAD_GPR6 EXCEPTION_LOAD_GPR5 EXCEPTION_LOAD_GPR4 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR2 l.rfe // SR <- ESR, PC <- EPC exit_with_no_itranslation: EXCEPTION_LOAD_GPR4 EXCEPTION_LOAD_GPR6 l.j _dispatch_bus_fault l.nop /* ====================================================================== */ /* * Stuff below here shouldn't go into .head section... maybe this stuff * can be moved to entry.S ??? */ /* ==============================================[ DTLB miss handler ]=== */ /* * Comments: * Exception handlers are entered with MMU off so the following handler * needs to use physical addressing * */ .text ENTRY(dtlb_miss_handler) EXCEPTION_STORE_GPR2 EXCEPTION_STORE_GPR3 EXCEPTION_STORE_GPR4 /* * get EA of the miss */ l.mfspr r2,r0,SPR_EEAR_BASE /* * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); */ GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) l.slli r4,r4,0x2 // to get address << 2 l.add r3,r4,r3 // r4 is pgd_index(daddr) /* * if (pmd_none(*pmd)) * goto pmd_none: */ tophys (r4,r3) l.lwz r3,0x0(r4) // get *pmd value l.sfne r3,r0 l.bnf d_pmd_none l.addi r3,r0,0xffffe000 // PAGE_MASK d_pmd_good: /* * pte = *pte_offset(pmd, daddr); */ l.lwz r4,0x0(r4) // get **pmd value l.and r4,r4,r3 // & PAGE_MASK l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 l.slli r3,r3,0x2 // to get address << 2 l.add r3,r3,r4 l.lwz r3,0x0(r3) // this is pte at last /* * if (!pte_present(pte)) */ l.andi r4,r3,0x1 l.sfne r4,r0 // is pte present l.bnf d_pte_not_present l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK /* * fill DTLB TR register */ l.and r4,r3,r4 // apply the mask // Determine number of DMMU sets l.mfspr r2, r0, SPR_DMMUCFGR l.andi r2, r2, SPR_DMMUCFGR_NTS l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF l.ori r3, r0, 0x1 l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR l.addi r2, r3, -1 // r2 = nsets mask l.mfspr r3, r0, SPR_EEAR_BASE l.srli r3, r3, 0xd // >> PAGE_SHIFT l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) //NUM_TLB_ENTRIES l.mtspr r2,r4,SPR_DTLBTR_BASE(0) /* * fill DTLB MR register */ l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry l.mtspr r2,r4,SPR_DTLBMR_BASE(0) EXCEPTION_LOAD_GPR2 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR4 l.rfe d_pmd_none: d_pte_not_present: EXCEPTION_LOAD_GPR2 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR4 EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler) /* ==============================================[ ITLB miss handler ]=== */ ENTRY(itlb_miss_handler) EXCEPTION_STORE_GPR2 EXCEPTION_STORE_GPR3 EXCEPTION_STORE_GPR4 /* * get EA of the miss */ l.mfspr r2,r0,SPR_EEAR_BASE /* * pmd = (pmd_t *)(current_pgd + pgd_index(daddr)); * */ GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2) l.slli r4,r4,0x2 // to get address << 2 l.add r3,r4,r3 // r4 is pgd_index(daddr) /* * if (pmd_none(*pmd)) * goto pmd_none: */ tophys (r4,r3) l.lwz r3,0x0(r4) // get *pmd value l.sfne r3,r0 l.bnf i_pmd_none l.addi r3,r0,0xffffe000 // PAGE_MASK i_pmd_good: /* * pte = *pte_offset(pmd, iaddr); * */ l.lwz r4,0x0(r4) // get **pmd value l.and r4,r4,r3 // & PAGE_MASK l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1 l.slli r3,r3,0x2 // to get address << 2 l.add r3,r3,r4 l.lwz r3,0x0(r3) // this is pte at last /* * if (!pte_present(pte)) * */ l.andi r4,r3,0x1 l.sfne r4,r0 // is pte present l.bnf i_pte_not_present l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK /* * fill ITLB TR register */ l.and r4,r3,r4 // apply the mask l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE l.sfeq r3,r0 l.bf itlb_tr_fill //_workaround // Determine number of IMMU sets l.mfspr r2, r0, SPR_IMMUCFGR l.andi r2, r2, SPR_IMMUCFGR_NTS l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF l.ori r3, r0, 0x1 l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR l.addi r2, r3, -1 // r2 = nsets mask l.mfspr r3, r0, SPR_EEAR_BASE l.srli r3, r3, 0xd // >> PAGE_SHIFT l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1) /* * __PHX__ :: fixme * we should not just blindly set executable flags, * but it does help with ping. the clean way would be to find out * (and fix it) why stack doesn't have execution permissions */ itlb_tr_fill_workaround: l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE) itlb_tr_fill: l.mtspr r2,r4,SPR_ITLBTR_BASE(0) /* * fill DTLB MR register */ l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */ l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry l.mtspr r2,r4,SPR_ITLBMR_BASE(0) EXCEPTION_LOAD_GPR2 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR4 l.rfe i_pmd_none: i_pte_not_present: EXCEPTION_LOAD_GPR2 EXCEPTION_LOAD_GPR3 EXCEPTION_LOAD_GPR4 EXCEPTION_HANDLE(_itlb_miss_page_fault_handler) /* ==============================================[ boot tlb handlers ]=== */ /* =================================================[ debugging aids ]=== */ /* * DESC: Prints ASCII character stored in r7 * * PRMS: r7 - a 32-bit value with an ASCII character in the first byte * position. * * PREQ: The UART at UART_BASE_ADD has to be initialized * * POST: internally used but restores: * r4 - to store UART_BASE_ADD * r5 - for loading OFF_TXFULL / THRE,TEMT * r6 - for storing bitmask (SERIAL_8250) */ ENTRY(_emergency_putc) EMERGENCY_PRINT_STORE_GPR4 EMERGENCY_PRINT_STORE_GPR5 EMERGENCY_PRINT_STORE_GPR6 l.movhi r4,hi(UART_BASE_ADD) l.ori r4,r4,lo(UART_BASE_ADD) #if defined(CONFIG_SERIAL_LITEUART) /* Check OFF_TXFULL status */ 1: l.lwz r5,4(r4) l.andi r5,r5,0xff l.sfnei r5,0 l.bf 1b l.nop /* Write character */ l.andi r7,r7,0xff l.sw 0(r4),r7 #elif defined(CONFIG_SERIAL_8250) /* Check UART LSR THRE (hold) bit */ l.addi r6,r0,0x20 1: l.lbz r5,5(r4) l.andi r5,r5,0x20 l.sfeq r5,r6 l.bnf 1b l.nop /* Write character */ l.sb 0(r4),r7 /* Check UART LSR THRE|TEMT (hold, empty) bits */ l.addi r6,r0,0x60 1: l.lbz r5,5(r4) l.andi r5,r5,0x60 l.sfeq r5,r6 l.bnf 1b l.nop #endif EMERGENCY_PRINT_LOAD_GPR6 EMERGENCY_PRINT_LOAD_GPR5 EMERGENCY_PRINT_LOAD_GPR4 l.jr r9 l.nop /* * DSCR: prints a string referenced by r3. * * PRMS: r3 - address of the first character of null * terminated string to be printed * * PREQ: UART at UART_BASE_ADD has to be initialized * * POST: caller should be aware that r3, r9 are changed */ ENTRY(_emergency_print) EMERGENCY_PRINT_STORE_GPR7 EMERGENCY_PRINT_STORE_GPR9 /* Load character to r7, check for null terminator */ 2: l.lbz r7,0(r3) l.sfeqi r7,0x0 l.bf 9f l.nop l.jal _emergency_putc l.nop /* next character */ l.j 2b l.addi r3,r3,0x1 9: EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR7 l.jr r9 l.nop /* * DSCR: prints a number in r3 in hex. * * PRMS: r3 - a 32-bit unsigned integer * * PREQ: UART at UART_BASE_ADD has to be initialized * * POST: caller should be aware that r3, r9 are changed */ ENTRY(_emergency_print_nr) EMERGENCY_PRINT_STORE_GPR7 EMERGENCY_PRINT_STORE_GPR8 EMERGENCY_PRINT_STORE_GPR9 l.addi r8,r0,32 // shift register 1: /* remove leading zeros */ l.addi r8,r8,-0x4 l.srl r7,r3,r8 l.andi r7,r7,0xf /* don't skip the last zero if number == 0x0 */ l.sfeqi r8,0x4 l.bf 2f l.nop l.sfeq r7,r0 l.bf 1b l.nop 2: l.srl r7,r3,r8 l.andi r7,r7,0xf l.sflts r8,r0 l.bf 9f /* Numbers greater than 9 translate to a-f */ l.sfgtui r7,0x9 l.bnf 8f l.nop l.addi r7,r7,0x27 /* Convert to ascii and output character */ 8: l.jal _emergency_putc l.addi r7,r7,0x30 /* next character */ l.j 2b l.addi r8,r8,-0x4 9: EMERGENCY_PRINT_LOAD_GPR9 EMERGENCY_PRINT_LOAD_GPR8 EMERGENCY_PRINT_LOAD_GPR7 l.jr r9 l.nop /* * This should be used for debugging only. * It messes up the Linux early serial output * somehow, so use it sparingly and essentially * only if you need to debug something that goes wrong * before Linux gets the early serial going. * * Furthermore, you'll have to make sure you set the * UART_DEVISOR correctly according to the system * clock rate. * * */ #define SYS_CLK 20000000 //#define SYS_CLK 1843200 #define OR32_CONSOLE_BAUD 115200 #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD) ENTRY(_early_uart_init) l.movhi r3,hi(UART_BASE_ADD) l.ori r3,r3,lo(UART_BASE_ADD) #if defined(CONFIG_SERIAL_8250) l.addi r4,r0,0x7 l.sb 0x2(r3),r4 l.addi r4,r0,0x0 l.sb 0x1(r3),r4 l.addi r4,r0,0x3 l.sb 0x3(r3),r4 l.lbz r5,3(r3) l.ori r4,r5,0x80 l.sb 0x3(r3),r4 l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff) l.sb UART_DLM(r3),r4 l.addi r4,r0,((UART_DIVISOR) & 0x000000ff) l.sb UART_DLL(r3),r4 l.sb 0x3(r3),r5 #endif l.jr r9 l.nop .align 0x1000 .global _secondary_evbar _secondary_evbar: .space 0x800 /* Just disable interrupts and Return */ l.ori r3,r0,SPR_SR_SM l.mtspr r0,r3,SPR_ESR_BASE l.rfe .section .rodata _string_unhandled_exception: .string "\r\nRunarunaround: Unhandled exception 0x\0" _string_epc_prefix: .string ": EPC=0x\0" _string_nl: .string "\r\n\0" /* ========================================[ page aligned structures ]=== */ /* * .data section should be page aligned * (look into arch/openrisc/kernel/vmlinux.lds.S) */ .section .data,"aw" .align 8192 .global empty_zero_page empty_zero_page: .space 8192 .global swapper_pg_dir swapper_pg_dir: .space 8192 .global _unhandled_stack _unhandled_stack: .space 8192 _unhandled_stack_top: /* ============================================================[ EOF ]=== */
aixcc-public/challenge-001-exemplar-source
33,626
arch/openrisc/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OpenRISC entry.S * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> */ #include <linux/linkage.h> #include <linux/pgtable.h> #include <asm/processor.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/errno.h> #include <asm/spr_defs.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/asm-offsets.h> #define DISABLE_INTERRUPTS(t1,t2) \ l.mfspr t2,r0,SPR_SR ;\ l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\ l.and t2,t2,t1 ;\ l.mtspr r0,t2,SPR_SR #define ENABLE_INTERRUPTS(t1) \ l.mfspr t1,r0,SPR_SR ;\ l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\ l.mtspr r0,t1,SPR_SR /* =========================================================[ macros ]=== */ #ifdef CONFIG_TRACE_IRQFLAGS /* * Trace irq on/off creating a stack frame. */ #define TRACE_IRQS_OP(trace_op) \ l.sw -8(r1),r2 /* store frame pointer */ ;\ l.sw -4(r1),r9 /* store return address */ ;\ l.addi r2,r1,0 /* move sp to fp */ ;\ l.jal trace_op ;\ l.addi r1,r1,-8 ;\ l.ori r1,r2,0 /* restore sp */ ;\ l.lwz r9,-4(r1) /* restore return address */ ;\ l.lwz r2,-8(r1) /* restore fp */ ;\ /* * Trace irq on/off and save registers we need that would otherwise be * clobbered. */ #define TRACE_IRQS_SAVE(t1,trace_op) \ l.sw -12(r1),t1 /* save extra reg */ ;\ l.sw -8(r1),r2 /* store frame pointer */ ;\ l.sw -4(r1),r9 /* store return address */ ;\ l.addi r2,r1,0 /* move sp to fp */ ;\ l.jal trace_op ;\ l.addi r1,r1,-12 ;\ l.ori r1,r2,0 /* restore sp */ ;\ l.lwz r9,-4(r1) /* restore return address */ ;\ l.lwz r2,-8(r1) /* restore fp */ ;\ l.lwz t1,-12(r1) /* restore extra reg */ #define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_hardirqs_off) #define TRACE_IRQS_ON TRACE_IRQS_OP(trace_hardirqs_on) #define TRACE_IRQS_ON_SYSCALL \ TRACE_IRQS_SAVE(r10,trace_hardirqs_on) ;\ l.lwz r3,PT_GPR3(r1) ;\ l.lwz r4,PT_GPR4(r1) ;\ l.lwz r5,PT_GPR5(r1) ;\ l.lwz r6,PT_GPR6(r1) ;\ l.lwz r7,PT_GPR7(r1) ;\ l.lwz r8,PT_GPR8(r1) ;\ l.lwz r11,PT_GPR11(r1) #define TRACE_IRQS_OFF_ENTRY \ l.lwz r5,PT_SR(r1) ;\ l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) ;\ l.sfeq r5,r0 /* skip trace if irqs were already off */;\ l.bf 1f ;\ l.nop ;\ TRACE_IRQS_SAVE(r4,trace_hardirqs_off) ;\ 1: #else #define TRACE_IRQS_OFF #define TRACE_IRQS_ON #define TRACE_IRQS_OFF_ENTRY #define TRACE_IRQS_ON_SYSCALL #endif /* * We need to disable interrupts at beginning of RESTORE_ALL * since interrupt might come in after we've loaded EPC return address * and overwrite EPC with address somewhere in RESTORE_ALL * which is of course wrong! */ #define RESTORE_ALL \ DISABLE_INTERRUPTS(r3,r4) ;\ l.lwz r3,PT_PC(r1) ;\ l.mtspr r0,r3,SPR_EPCR_BASE ;\ l.lwz r3,PT_SR(r1) ;\ l.mtspr r0,r3,SPR_ESR_BASE ;\ l.lwz r2,PT_GPR2(r1) ;\ l.lwz r3,PT_GPR3(r1) ;\ l.lwz r4,PT_GPR4(r1) ;\ l.lwz r5,PT_GPR5(r1) ;\ l.lwz r6,PT_GPR6(r1) ;\ l.lwz r7,PT_GPR7(r1) ;\ l.lwz r8,PT_GPR8(r1) ;\ l.lwz r9,PT_GPR9(r1) ;\ l.lwz r10,PT_GPR10(r1) ;\ l.lwz r11,PT_GPR11(r1) ;\ l.lwz r12,PT_GPR12(r1) ;\ l.lwz r13,PT_GPR13(r1) ;\ l.lwz r14,PT_GPR14(r1) ;\ l.lwz r15,PT_GPR15(r1) ;\ l.lwz r16,PT_GPR16(r1) ;\ l.lwz r17,PT_GPR17(r1) ;\ l.lwz r18,PT_GPR18(r1) ;\ l.lwz r19,PT_GPR19(r1) ;\ l.lwz r20,PT_GPR20(r1) ;\ l.lwz r21,PT_GPR21(r1) ;\ l.lwz r22,PT_GPR22(r1) ;\ l.lwz r23,PT_GPR23(r1) ;\ l.lwz r24,PT_GPR24(r1) ;\ l.lwz r25,PT_GPR25(r1) ;\ l.lwz r26,PT_GPR26(r1) ;\ l.lwz r27,PT_GPR27(r1) ;\ l.lwz r28,PT_GPR28(r1) ;\ l.lwz r29,PT_GPR29(r1) ;\ l.lwz r30,PT_GPR30(r1) ;\ l.lwz r31,PT_GPR31(r1) ;\ l.lwz r1,PT_SP(r1) ;\ l.rfe #define EXCEPTION_ENTRY(handler) \ .global handler ;\ handler: ;\ /* r1, EPCR, ESR a already saved */ ;\ l.sw PT_GPR2(r1),r2 ;\ l.sw PT_GPR3(r1),r3 ;\ /* r4 already save */ ;\ l.sw PT_GPR5(r1),r5 ;\ l.sw PT_GPR6(r1),r6 ;\ l.sw PT_GPR7(r1),r7 ;\ l.sw PT_GPR8(r1),r8 ;\ l.sw PT_GPR9(r1),r9 ;\ /* r10 already saved */ ;\ l.sw PT_GPR11(r1),r11 ;\ /* r12 already saved */ ;\ l.sw PT_GPR13(r1),r13 ;\ l.sw PT_GPR14(r1),r14 ;\ l.sw PT_GPR15(r1),r15 ;\ l.sw PT_GPR16(r1),r16 ;\ l.sw PT_GPR17(r1),r17 ;\ l.sw PT_GPR18(r1),r18 ;\ l.sw PT_GPR19(r1),r19 ;\ l.sw PT_GPR20(r1),r20 ;\ l.sw PT_GPR21(r1),r21 ;\ l.sw PT_GPR22(r1),r22 ;\ l.sw PT_GPR23(r1),r23 ;\ l.sw PT_GPR24(r1),r24 ;\ l.sw PT_GPR25(r1),r25 ;\ l.sw PT_GPR26(r1),r26 ;\ l.sw PT_GPR27(r1),r27 ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ /* r30 already save */ ;\ l.sw PT_GPR31(r1),r31 ;\ TRACE_IRQS_OFF_ENTRY ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ l.addi r30,r0,-1 ;\ l.sw PT_ORIG_GPR11(r1),r30 #define UNHANDLED_EXCEPTION(handler,vector) \ .global handler ;\ handler: ;\ /* r1, EPCR, ESR already saved */ ;\ l.sw PT_GPR2(r1),r2 ;\ l.sw PT_GPR3(r1),r3 ;\ l.sw PT_GPR5(r1),r5 ;\ l.sw PT_GPR6(r1),r6 ;\ l.sw PT_GPR7(r1),r7 ;\ l.sw PT_GPR8(r1),r8 ;\ l.sw PT_GPR9(r1),r9 ;\ /* r10 already saved */ ;\ l.sw PT_GPR11(r1),r11 ;\ /* r12 already saved */ ;\ l.sw PT_GPR13(r1),r13 ;\ l.sw PT_GPR14(r1),r14 ;\ l.sw PT_GPR15(r1),r15 ;\ l.sw PT_GPR16(r1),r16 ;\ l.sw PT_GPR17(r1),r17 ;\ l.sw PT_GPR18(r1),r18 ;\ l.sw PT_GPR19(r1),r19 ;\ l.sw PT_GPR20(r1),r20 ;\ l.sw PT_GPR21(r1),r21 ;\ l.sw PT_GPR22(r1),r22 ;\ l.sw PT_GPR23(r1),r23 ;\ l.sw PT_GPR24(r1),r24 ;\ l.sw PT_GPR25(r1),r25 ;\ l.sw PT_GPR26(r1),r26 ;\ l.sw PT_GPR27(r1),r27 ;\ l.sw PT_GPR28(r1),r28 ;\ l.sw PT_GPR29(r1),r29 ;\ /* r30 already saved */ ;\ l.sw PT_GPR31(r1),r31 ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ l.addi r30,r0,-1 ;\ l.sw PT_ORIG_GPR11(r1),r30 ;\ l.addi r3,r1,0 ;\ /* r4 is exception EA */ ;\ l.addi r5,r0,vector ;\ l.jal unhandled_exception ;\ l.nop ;\ l.j _ret_from_exception ;\ l.nop /* clobbers 'reg' */ #define CLEAR_LWA_FLAG(reg) \ l.movhi reg,hi(lwa_flag) ;\ l.ori reg,reg,lo(lwa_flag) ;\ l.sw 0(reg),r0 /* * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR * contain the same values as when exception we're handling * occured. in fact they never do. if you need them use * values saved on stack (for SPR_EPC, SPR_ESR) or content * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE() * in 'arch/openrisc/kernel/head.S' */ /* =====================================================[ exceptions] === */ /* ---[ 0x100: RESET exception ]----------------------------------------- */ EXCEPTION_ENTRY(_tng_kernel_start) l.jal _start l.andi r0,r0,0 /* ---[ 0x200: BUS exception ]------------------------------------------- */ EXCEPTION_ENTRY(_bus_fault_handler) CLEAR_LWA_FLAG(r3) /* r4: EA of fault (set by EXCEPTION_HANDLE) */ l.jal do_bus_fault l.addi r3,r1,0 /* pt_regs */ l.j _ret_from_exception l.nop /* ---[ 0x300: Data Page Fault exception ]------------------------------- */ EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler) CLEAR_LWA_FLAG(r3) l.and r5,r5,r0 l.j 1f l.nop EXCEPTION_ENTRY(_data_page_fault_handler) CLEAR_LWA_FLAG(r3) /* set up parameters for do_page_fault */ l.ori r5,r0,0x300 // exception vector 1: l.addi r3,r1,0 // pt_regs /* r4 set be EXCEPTION_HANDLE */ // effective address of fault #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX l.lwz r6,PT_PC(r3) // address of an offending insn l.lwz r6,0(r6) // instruction that caused pf l.srli r6,r6,26 // check opcode for jump insn l.sfeqi r6,0 // l.j l.bf 8f l.sfeqi r6,1 // l.jal l.bf 8f l.sfeqi r6,3 // l.bnf l.bf 8f l.sfeqi r6,4 // l.bf l.bf 8f l.sfeqi r6,0x11 // l.jr l.bf 8f l.sfeqi r6,0x12 // l.jalr l.bf 8f l.nop l.j 9f l.nop 8: // offending insn is in delay slot l.lwz r6,PT_PC(r3) // address of an offending insn l.addi r6,r6,4 l.lwz r6,0(r6) // instruction that caused pf l.srli r6,r6,26 // get opcode 9: // offending instruction opcode loaded in r6 #else l.mfspr r6,r0,SPR_SR // SR l.andi r6,r6,SPR_SR_DSX // check for delay slot exception l.sfne r6,r0 // exception happened in delay slot l.bnf 7f l.lwz r6,PT_PC(r3) // address of an offending insn l.addi r6,r6,4 // offending insn is in delay slot 7: l.lwz r6,0(r6) // instruction that caused pf l.srli r6,r6,26 // check opcode for write access #endif l.sfgeui r6,0x33 // check opcode for write access l.bnf 1f l.sfleui r6,0x37 l.bnf 1f l.ori r6,r0,0x1 // write access l.j 2f l.nop 1: l.ori r6,r0,0x0 // !write access 2: /* call fault.c handler in openrisc/mm/fault.c */ l.jal do_page_fault l.nop l.j _ret_from_exception l.nop /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */ EXCEPTION_ENTRY(_itlb_miss_page_fault_handler) CLEAR_LWA_FLAG(r3) l.and r5,r5,r0 l.j 1f l.nop EXCEPTION_ENTRY(_insn_page_fault_handler) CLEAR_LWA_FLAG(r3) /* set up parameters for do_page_fault */ l.ori r5,r0,0x400 // exception vector 1: l.addi r3,r1,0 // pt_regs /* r4 set be EXCEPTION_HANDLE */ // effective address of fault l.ori r6,r0,0x0 // !write access /* call fault.c handler in openrisc/mm/fault.c */ l.jal do_page_fault l.nop l.j _ret_from_exception l.nop /* ---[ 0x500: Timer exception ]----------------------------------------- */ EXCEPTION_ENTRY(_timer_handler) CLEAR_LWA_FLAG(r3) l.jal timer_interrupt l.addi r3,r1,0 /* pt_regs */ l.j _ret_from_intr l.nop /* ---[ 0x600: Alignment exception ]-------------------------------------- */ EXCEPTION_ENTRY(_alignment_handler) CLEAR_LWA_FLAG(r3) /* r4: EA of fault (set by EXCEPTION_HANDLE) */ l.jal do_unaligned_access l.addi r3,r1,0 /* pt_regs */ l.j _ret_from_exception l.nop #if 0 EXCEPTION_ENTRY(_alignment_handler) // l.mfspr r2,r0,SPR_EEAR_BASE /* Load the effective address */ l.addi r2,r4,0 // l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */ l.lwz r5,PT_PC(r1) l.lwz r3,0(r5) /* Load insn */ l.srli r4,r3,26 /* Shift left to get the insn opcode */ l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */ l.bf jmp l.sfeqi r4,0x01 l.bf jmp l.sfeqi r4,0x03 l.bf jmp l.sfeqi r4,0x04 l.bf jmp l.sfeqi r4,0x11 l.bf jr l.sfeqi r4,0x12 l.bf jr l.nop l.j 1f l.addi r5,r5,4 /* Increment PC to get return insn address */ jmp: l.slli r4,r3,6 /* Get the signed extended jump length */ l.srai r4,r4,4 l.lwz r3,4(r5) /* Load the real load/store insn */ l.add r5,r5,r4 /* Calculate jump target address */ l.j 1f l.srli r4,r3,26 /* Shift left to get the insn opcode */ jr: l.slli r4,r3,9 /* Shift to get the reg nb */ l.andi r4,r4,0x7c l.lwz r3,4(r5) /* Load the real load/store insn */ l.add r4,r4,r1 /* Load the jump register value from the stack */ l.lwz r5,0(r4) l.srli r4,r3,26 /* Shift left to get the insn opcode */ 1: // l.mtspr r0,r5,SPR_EPCR_BASE l.sw PT_PC(r1),r5 l.sfeqi r4,0x26 l.bf lhs l.sfeqi r4,0x25 l.bf lhz l.sfeqi r4,0x22 l.bf lws l.sfeqi r4,0x21 l.bf lwz l.sfeqi r4,0x37 l.bf sh l.sfeqi r4,0x35 l.bf sw l.nop 1: l.j 1b /* I don't know what to do */ l.nop lhs: l.lbs r5,0(r2) l.slli r5,r5,8 l.lbz r6,1(r2) l.or r5,r5,r6 l.srli r4,r3,19 l.andi r4,r4,0x7c l.add r4,r4,r1 l.j align_end l.sw 0(r4),r5 lhz: l.lbz r5,0(r2) l.slli r5,r5,8 l.lbz r6,1(r2) l.or r5,r5,r6 l.srli r4,r3,19 l.andi r4,r4,0x7c l.add r4,r4,r1 l.j align_end l.sw 0(r4),r5 lws: l.lbs r5,0(r2) l.slli r5,r5,24 l.lbz r6,1(r2) l.slli r6,r6,16 l.or r5,r5,r6 l.lbz r6,2(r2) l.slli r6,r6,8 l.or r5,r5,r6 l.lbz r6,3(r2) l.or r5,r5,r6 l.srli r4,r3,19 l.andi r4,r4,0x7c l.add r4,r4,r1 l.j align_end l.sw 0(r4),r5 lwz: l.lbz r5,0(r2) l.slli r5,r5,24 l.lbz r6,1(r2) l.slli r6,r6,16 l.or r5,r5,r6 l.lbz r6,2(r2) l.slli r6,r6,8 l.or r5,r5,r6 l.lbz r6,3(r2) l.or r5,r5,r6 l.srli r4,r3,19 l.andi r4,r4,0x7c l.add r4,r4,r1 l.j align_end l.sw 0(r4),r5 sh: l.srli r4,r3,9 l.andi r4,r4,0x7c l.add r4,r4,r1 l.lwz r5,0(r4) l.sb 1(r2),r5 l.srli r5,r5,8 l.j align_end l.sb 0(r2),r5 sw: l.srli r4,r3,9 l.andi r4,r4,0x7c l.add r4,r4,r1 l.lwz r5,0(r4) l.sb 3(r2),r5 l.srli r5,r5,8 l.sb 2(r2),r5 l.srli r5,r5,8 l.sb 1(r2),r5 l.srli r5,r5,8 l.j align_end l.sb 0(r2),r5 align_end: l.j _ret_from_intr l.nop #endif /* ---[ 0x700: Illegal insn exception ]---------------------------------- */ EXCEPTION_ENTRY(_illegal_instruction_handler) /* r4: EA of fault (set by EXCEPTION_HANDLE) */ l.jal do_illegal_instruction l.addi r3,r1,0 /* pt_regs */ l.j _ret_from_exception l.nop /* ---[ 0x800: External interrupt exception ]---------------------------- */ EXCEPTION_ENTRY(_external_irq_handler) #ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK l.lwz r4,PT_SR(r1) // were interrupts enabled ? l.andi r4,r4,SPR_SR_IEE l.sfeqi r4,0 l.bnf 1f // ext irq enabled, all ok. l.nop #ifdef CONFIG_PRINTK l.addi r1,r1,-0x8 l.movhi r3,hi(42f) l.ori r3,r3,lo(42f) l.sw 0x0(r1),r3 l.jal _printk l.sw 0x4(r1),r4 l.addi r1,r1,0x8 .section .rodata, "a" 42: .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" .align 4 .previous #endif l.ori r4,r4,SPR_SR_IEE // fix the bug // l.sw PT_SR(r1),r4 1: #endif CLEAR_LWA_FLAG(r3) l.addi r3,r1,0 l.movhi r8,hi(generic_handle_arch_irq) l.ori r8,r8,lo(generic_handle_arch_irq) l.jalr r8 l.nop l.j _ret_from_intr l.nop /* ---[ 0x900: DTLB miss exception ]------------------------------------- */ /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */ /* ---[ 0xb00: Range exception ]----------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) /* ---[ 0xc00: Syscall exception ]--------------------------------------- */ /* * Syscalls are a special type of exception in that they are * _explicitly_ invoked by userspace and can therefore be * held to conform to the same ABI as normal functions with * respect to whether registers are preserved across the call * or not. */ /* Upon syscall entry we just save the callee-saved registers * and not the call-clobbered ones. */ _string_syscall_return: .string "syscall r9:0x%08x -> syscall(%ld) return %ld\0" .align 4 ENTRY(_sys_call_handler) /* r1, EPCR, ESR a already saved */ l.sw PT_GPR2(r1),r2 /* r3-r8 must be saved because syscall restart relies * on us being able to restart the syscall args... technically * they should be clobbered, otherwise */ l.sw PT_GPR3(r1),r3 /* * r4 already saved * r4 holds the EEAR address of the fault, use it as screatch reg and * then load the original r4 */ CLEAR_LWA_FLAG(r4) l.lwz r4,PT_GPR4(r1) l.sw PT_GPR5(r1),r5 l.sw PT_GPR6(r1),r6 l.sw PT_GPR7(r1),r7 l.sw PT_GPR8(r1),r8 l.sw PT_GPR9(r1),r9 /* r10 already saved */ l.sw PT_GPR11(r1),r11 /* orig_gpr11 must be set for syscalls */ l.sw PT_ORIG_GPR11(r1),r11 /* r12,r13 already saved */ /* r14-r28 (even) aren't touched by the syscall fast path below * so we don't need to save them. However, the functions that return * to userspace via a call to switch() DO need to save these because * switch() effectively clobbers them... saving these registers for * such functions is handled in their syscall wrappers (see fork, vfork, * and clone, below). /* r30 is the only register we clobber in the fast path */ /* r30 already saved */ /* l.sw PT_GPR30(r1),r30 */ _syscall_check_trace_enter: /* syscalls run with interrupts enabled */ TRACE_IRQS_ON_SYSCALL ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */ l.lwz r30,TI_FLAGS(r10) l.andi r30,r30,_TIF_SYSCALL_TRACE l.sfne r30,r0 l.bf _syscall_trace_enter l.nop _syscall_check: /* Ensure that the syscall number is reasonable */ l.sfgeui r11,__NR_syscalls l.bf _syscall_badsys l.nop _syscall_call: l.movhi r29,hi(sys_call_table) l.ori r29,r29,lo(sys_call_table) l.slli r11,r11,2 l.add r29,r29,r11 l.lwz r29,0(r29) l.jalr r29 l.nop _syscall_return: /* All syscalls return here... just pay attention to ret_from_fork * which does it in a round-about way. */ l.sw PT_GPR11(r1),r11 // save return value #if 0 _syscall_debug: l.movhi r3,hi(_string_syscall_return) l.ori r3,r3,lo(_string_syscall_return) l.ori r27,r0,2 l.sw -4(r1),r27 l.sw -8(r1),r11 l.lwz r29,PT_ORIG_GPR11(r1) l.sw -12(r1),r29 l.lwz r29,PT_GPR9(r1) l.sw -16(r1),r29 l.movhi r27,hi(_printk) l.ori r27,r27,lo(_printk) l.jalr r27 l.addi r1,r1,-16 l.addi r1,r1,16 #endif #if 0 _syscall_show_regs: l.movhi r27,hi(show_registers) l.ori r27,r27,lo(show_registers) l.jalr r27 l.or r3,r1,r1 #endif _syscall_check_trace_leave: /* r30 is a callee-saved register so this should still hold the * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above... * _syscall_trace_leave expects syscall result to be in pt_regs->r11. */ l.sfne r30,r0 l.bf _syscall_trace_leave l.nop /* This is where the exception-return code begins... interrupts need to be * disabled the rest of the way here because we can't afford to miss any * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */ _syscall_check_work: /* Here we need to disable interrupts */ DISABLE_INTERRUPTS(r27,r29) TRACE_IRQS_OFF l.lwz r30,TI_FLAGS(r10) l.andi r30,r30,_TIF_WORK_MASK l.sfne r30,r0 l.bnf _syscall_resume_userspace l.nop /* Work pending follows a different return path, so we need to * make sure that all the call-saved registers get into pt_regs * before branching... */ l.sw PT_GPR14(r1),r14 l.sw PT_GPR16(r1),r16 l.sw PT_GPR18(r1),r18 l.sw PT_GPR20(r1),r20 l.sw PT_GPR22(r1),r22 l.sw PT_GPR24(r1),r24 l.sw PT_GPR26(r1),r26 l.sw PT_GPR28(r1),r28 /* _work_pending needs to be called with interrupts disabled */ l.j _work_pending l.nop _syscall_resume_userspace: // ENABLE_INTERRUPTS(r29) /* This is the hot path for returning to userspace from a syscall. If there's * work to be done and the branch to _work_pending was taken above, then the * return to userspace will be done via the normal exception return path... * that path restores _all_ registers and will overwrite the "clobbered" * registers with whatever garbage is in pt_regs -- that's OK because those * registers are clobbered anyway and because the extra work is insignificant * in the context of the extra work that _work_pending is doing. /* Once again, syscalls are special and only guarantee to preserve the * same registers as a normal function call */ /* The assumption here is that the registers r14-r28 (even) are untouched and * don't need to be restored... be sure that that's really the case! */ /* This is still too much... we should only be restoring what we actually * clobbered... we should even be using 'scratch' (odd) regs above so that * we don't need to restore anything, hardly... */ l.lwz r2,PT_GPR2(r1) /* Restore args */ /* r3-r8 are technically clobbered, but syscall restart needs these * to be restored... */ l.lwz r3,PT_GPR3(r1) l.lwz r4,PT_GPR4(r1) l.lwz r5,PT_GPR5(r1) l.lwz r6,PT_GPR6(r1) l.lwz r7,PT_GPR7(r1) l.lwz r8,PT_GPR8(r1) l.lwz r9,PT_GPR9(r1) l.lwz r10,PT_GPR10(r1) l.lwz r11,PT_GPR11(r1) /* r30 is the only register we clobber in the fast path */ l.lwz r30,PT_GPR30(r1) /* Here we use r13-r19 (odd) as scratch regs */ l.lwz r13,PT_PC(r1) l.lwz r15,PT_SR(r1) l.lwz r1,PT_SP(r1) /* Interrupts need to be disabled for setting EPCR and ESR * so that another interrupt doesn't come in here and clobber * them before we can use them for our l.rfe */ DISABLE_INTERRUPTS(r17,r19) l.mtspr r0,r13,SPR_EPCR_BASE l.mtspr r0,r15,SPR_ESR_BASE l.rfe /* End of hot path! * Keep the below tracing and error handling out of the hot path... */ _syscall_trace_enter: /* Here we pass pt_regs to do_syscall_trace_enter. Make sure * that function is really getting all the info it needs as * pt_regs isn't a complete set of userspace regs, just the * ones relevant to the syscall... * * Note use of delay slot for setting argument. */ l.jal do_syscall_trace_enter l.addi r3,r1,0 /* Restore arguments (not preserved across do_syscall_trace_enter) * so that we can do the syscall for real and return to the syscall * hot path. */ l.lwz r11,PT_GPR11(r1) l.lwz r3,PT_GPR3(r1) l.lwz r4,PT_GPR4(r1) l.lwz r5,PT_GPR5(r1) l.lwz r6,PT_GPR6(r1) l.lwz r7,PT_GPR7(r1) l.j _syscall_check l.lwz r8,PT_GPR8(r1) _syscall_trace_leave: l.jal do_syscall_trace_leave l.addi r3,r1,0 l.j _syscall_check_work l.nop _syscall_badsys: /* Here we effectively pretend to have executed an imaginary * syscall that returns -ENOSYS and then return to the regular * syscall hot path. * Note that "return value" is set in the delay slot... */ l.j _syscall_return l.addi r11,r0,-ENOSYS /******* END SYSCALL HANDLING *******/ /* ---[ 0xd00: Trap exception ]------------------------------------------ */ UNHANDLED_EXCEPTION(_vector_0xd00,0xd00) /* ---[ 0xe00: Trap exception ]------------------------------------------ */ EXCEPTION_ENTRY(_trap_handler) CLEAR_LWA_FLAG(r3) /* r4: EA of fault (set by EXCEPTION_HANDLE) */ l.jal do_trap l.addi r3,r1,0 /* pt_regs */ l.j _ret_from_exception l.nop /* ---[ 0xf00: Reserved exception ]-------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0xf00,0xf00) /* ---[ 0x1000: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1000,0x1000) /* ---[ 0x1100: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1100,0x1100) /* ---[ 0x1200: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1200,0x1200) /* ---[ 0x1300: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1300,0x1300) /* ---[ 0x1400: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1400,0x1400) /* ---[ 0x1500: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1500,0x1500) /* ---[ 0x1600: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1600,0x1600) /* ---[ 0x1700: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1700,0x1700) /* ---[ 0x1800: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1800,0x1800) /* ---[ 0x1900: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1900,0x1900) /* ---[ 0x1a00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00) /* ---[ 0x1b00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00) /* ---[ 0x1c00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00) /* ---[ 0x1d00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00) /* ---[ 0x1e00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00) /* ---[ 0x1f00: Reserved exception ]------------------------------------- */ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) /* ========================================================[ return ] === */ _resume_userspace: DISABLE_INTERRUPTS(r3,r4) TRACE_IRQS_OFF l.lwz r4,TI_FLAGS(r10) l.andi r13,r4,_TIF_WORK_MASK l.sfeqi r13,0 l.bf _restore_all l.nop _work_pending: l.lwz r5,PT_ORIG_GPR11(r1) l.sfltsi r5,0 l.bnf 1f l.nop l.andi r5,r5,0 1: l.jal do_work_pending l.ori r3,r1,0 /* pt_regs */ l.sfeqi r11,0 l.bf _restore_all l.nop l.sfltsi r11,0 l.bnf 1f l.nop l.and r11,r11,r0 l.ori r11,r11,__NR_restart_syscall l.j _syscall_check_trace_enter l.nop 1: l.lwz r11,PT_ORIG_GPR11(r1) /* Restore arg registers */ l.lwz r3,PT_GPR3(r1) l.lwz r4,PT_GPR4(r1) l.lwz r5,PT_GPR5(r1) l.lwz r6,PT_GPR6(r1) l.lwz r7,PT_GPR7(r1) l.j _syscall_check_trace_enter l.lwz r8,PT_GPR8(r1) _restore_all: #ifdef CONFIG_TRACE_IRQFLAGS l.lwz r4,PT_SR(r1) l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE) l.sfeq r3,r0 /* skip trace if irqs were off */ l.bf skip_hardirqs_on l.nop TRACE_IRQS_ON skip_hardirqs_on: #endif RESTORE_ALL /* This returns to userspace code */ ENTRY(_ret_from_intr) ENTRY(_ret_from_exception) l.lwz r4,PT_SR(r1) l.andi r3,r4,SPR_SR_SM l.sfeqi r3,0 l.bnf _restore_all l.nop l.j _resume_userspace l.nop ENTRY(ret_from_fork) l.jal schedule_tail l.nop /* Check if we are a kernel thread */ l.sfeqi r20,0 l.bf 1f l.nop /* ...we are a kernel thread so invoke the requested callback */ l.jalr r20 l.or r3,r22,r0 1: /* _syscall_returns expect r11 to contain return value */ l.lwz r11,PT_GPR11(r1) /* The syscall fast path return expects call-saved registers * r14-r28 to be untouched, so we restore them here as they * will have been effectively clobbered when arriving here * via the call to switch() */ l.lwz r14,PT_GPR14(r1) l.lwz r16,PT_GPR16(r1) l.lwz r18,PT_GPR18(r1) l.lwz r20,PT_GPR20(r1) l.lwz r22,PT_GPR22(r1) l.lwz r24,PT_GPR24(r1) l.lwz r26,PT_GPR26(r1) l.lwz r28,PT_GPR28(r1) l.j _syscall_return l.nop /* ========================================================[ switch ] === */ /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory * management hardware is updated to the second process's state. * Finally, we can return to the second process, via the 'return'. * * Note: there are two ways to get to the "going out" portion * of this code; either by coming in via the entry (_switch) * or via "fork" which must set up an environment equivalent * to the "_switch" path. If you change this (or in particular, the * SAVE_REGS macro), you'll have to change the fork code also. */ /* _switch MUST never lay on page boundry, cause it runs from * effective addresses and beeing interrupted by iTLB miss would kill it. * dTLB miss seems to never accour in the bad place since data accesses * are from task structures which are always page aligned. * * The problem happens in RESTORE_ALL where we first set the EPCR * register, then load the previous register values and only at the end call * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets * garbled and we end up calling l.rfe with the wrong EPCR. (same probably * holds for ESR) * * To avoid this problems it is sufficient to align _switch to * some nice round number smaller than it's size... */ /* ABI rules apply here... we either enter _switch via schedule() or via * an imaginary call to which we shall return at return_from_fork. Either * way, we are a function call and only need to preserve the callee-saved * registers when we return. As such, we don't need to save the registers * on the stack that we won't be returning as they were... */ .align 0x400 ENTRY(_switch) /* We don't store SR as _switch only gets called in a context where * the SR will be the same going in and coming out... */ /* Set up new pt_regs struct for saving task state */ l.addi r1,r1,-(INT_FRAME_SIZE) /* No need to store r1/PT_SP as it goes into KSP below */ l.sw PT_GPR2(r1),r2 l.sw PT_GPR9(r1),r9 /* Save callee-saved registers to the new pt_regs */ l.sw PT_GPR14(r1),r14 l.sw PT_GPR16(r1),r16 l.sw PT_GPR18(r1),r18 l.sw PT_GPR20(r1),r20 l.sw PT_GPR22(r1),r22 l.sw PT_GPR24(r1),r24 l.sw PT_GPR26(r1),r26 l.sw PT_GPR28(r1),r28 l.sw PT_GPR30(r1),r30 l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/ /* We use thread_info->ksp for storing the address of the above * structure so that we can get back to it later... we don't want * to lose the value of thread_info->ksp, though, so store it as * pt_regs->sp so that we can easily restore it when we are made * live again... */ /* Save the old value of thread_info->ksp as pt_regs->sp */ l.lwz r29,TI_KSP(r10) l.sw PT_SP(r1),r29 /* Swap kernel stack pointers */ l.sw TI_KSP(r10),r1 /* Save old stack pointer */ l.or r10,r4,r0 /* Set up new current_thread_info */ l.lwz r1,TI_KSP(r10) /* Load new stack pointer */ /* Restore the old value of thread_info->ksp */ l.lwz r29,PT_SP(r1) l.sw TI_KSP(r10),r29 /* ...and restore the registers, except r11 because the return value * has already been set above. */ l.lwz r2,PT_GPR2(r1) l.lwz r9,PT_GPR9(r1) /* No need to restore r10 */ /* ...and do not restore r11 */ /* Restore callee-saved registers */ l.lwz r14,PT_GPR14(r1) l.lwz r16,PT_GPR16(r1) l.lwz r18,PT_GPR18(r1) l.lwz r20,PT_GPR20(r1) l.lwz r22,PT_GPR22(r1) l.lwz r24,PT_GPR24(r1) l.lwz r26,PT_GPR26(r1) l.lwz r28,PT_GPR28(r1) l.lwz r30,PT_GPR30(r1) /* Unwind stack to pre-switch state */ l.addi r1,r1,(INT_FRAME_SIZE) /* Return via the link-register back to where we 'came from', where * that may be either schedule(), ret_from_fork(), or * ret_from_kernel_thread(). If we are returning to a new thread, * we are expected to have set up the arg to schedule_tail already, * hence we do so here unconditionally: */ l.lwz r3,TI_TASK(r3) /* Load 'prev' as schedule_tail arg */ l.jr r9 l.nop /* ==================================================================== */ /* These all use the delay slot for setting the argument register, so the * jump is always happening after the l.addi instruction. * * These are all just wrappers that don't touch the link-register r9, so the * return from the "real" syscall function will return back to the syscall * code that did the l.jal that brought us here. */ /* fork requires that we save all the callee-saved registers because they * are all effectively clobbered by the call to _switch. Here we store * all the registers that aren't touched by the syscall fast path and thus * weren't saved there. */ _fork_save_extra_regs_and_call: l.sw PT_GPR14(r1),r14 l.sw PT_GPR16(r1),r16 l.sw PT_GPR18(r1),r18 l.sw PT_GPR20(r1),r20 l.sw PT_GPR22(r1),r22 l.sw PT_GPR24(r1),r24 l.sw PT_GPR26(r1),r26 l.jr r29 l.sw PT_GPR28(r1),r28 ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) l.j _fork_save_extra_regs_and_call l.ori r29,r29,lo(sys_clone) ENTRY(__sys_clone3) l.movhi r29,hi(sys_clone3) l.j _fork_save_extra_regs_and_call l.ori r29,r29,lo(sys_clone3) ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.j _fork_save_extra_regs_and_call l.ori r29,r29,lo(sys_fork) ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn l.addi r3,r1,0 l.sfne r30,r0 l.bnf _no_syscall_trace l.nop l.jal do_syscall_trace_leave l.addi r3,r1,0 _no_syscall_trace: l.j _resume_userspace l.nop /* This is a catch-all syscall for atomic instructions for the OpenRISC 1000. * The functions takes a variable number of parameters depending on which * particular flavour of atomic you want... parameter 1 is a flag identifying * the atomic in question. Currently, this function implements the * following variants: * * XCHG: * @flag: 1 * @ptr1: * @ptr2: * Atomically exchange the values in pointers 1 and 2. * */ ENTRY(sys_or1k_atomic) /* FIXME: This ignores r3 and always does an XCHG */ DISABLE_INTERRUPTS(r17,r19) l.lwz r29,0(r4) l.lwz r27,0(r5) l.sw 0(r4),r27 l.sw 0(r5),r29 ENABLE_INTERRUPTS(r17) l.jr r9 l.or r11,r0,r0 /* ============================================================[ EOF ]=== */
aixcc-public/challenge-001-exemplar-source
2,280
arch/openrisc/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OpenRISC vmlinux.lds.S * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * ld script for OpenRISC architecture */ /* TODO * - clean up __offset & stuff * - change all 8192 alignment to PAGE !!! * - recheck if all alignments are really needed */ # define LOAD_OFFSET PAGE_OFFSET # define LOAD_BASE PAGE_OFFSET #include <asm/page.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm-generic/vmlinux.lds.h> #ifdef __OR1K__ #define __OUTPUT_FORMAT "elf32-or1k" #else #define __OUTPUT_FORMAT "elf32-or32" #endif OUTPUT_FORMAT(__OUTPUT_FORMAT, __OUTPUT_FORMAT, __OUTPUT_FORMAT) jiffies = jiffies_64 + 4; SECTIONS { /* Read-only sections, merged into text segment: */ . = LOAD_BASE ; _text = .; /* _s_kernel_ro must be page aligned */ . = ALIGN(PAGE_SIZE); _s_kernel_ro = .; .text : AT(ADDR(.text) - LOAD_OFFSET) { _stext = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.fixup) *(.text.__*) _etext = .; } /* TODO: Check if fixup and text.__* are really necessary * fixup is definitely necessary */ _sdata = .; /* Page alignment required for RO_DATA */ RO_DATA(PAGE_SIZE) _e_kernel_ro = .; /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ /* 32 here is cacheline size... recheck this */ RW_DATA(32, PAGE_SIZE, PAGE_SIZE) _edata = .; EXCEPTION_TABLE(4) /* Init code and data */ . = ALIGN(PAGE_SIZE); __init_begin = .; HEAD_TEXT_SECTION /* Page aligned */ INIT_TEXT_SECTION(PAGE_SIZE) /* Align __setup_start on 16 byte boundary */ INIT_DATA_SECTION(16) PERCPU_SECTION(L1_CACHE_BYTES) __init_end = .; BSS_SECTION(0, 0, 0x20) _end = .; /* Throw in the debugging sections */ STABS_DEBUG DWARF_DEBUG ELF_DETAILS /* Sections to be discarded -- must be last */ DISCARDS }
aixcc-public/challenge-001-exemplar-source
1,838
arch/openrisc/lib/string.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OpenRISC string.S * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> */ #include <linux/linkage.h> #include <asm/errno.h> /* * this can be optimized by doing gcc inline assemlby with * proper constraints (no need to save args registers...) * */ /* * * int __copy_tofrom_user(void *to, const void *from, unsigned long size); * * NOTE: it returns number of bytes NOT copied !!! * */ .global __copy_tofrom_user __copy_tofrom_user: l.addi r1,r1,-12 l.sw 0(r1),r6 l.sw 4(r1),r4 l.sw 8(r1),r3 l.addi r11,r5,0 2: l.sfeq r11,r0 l.bf 1f l.addi r11,r11,-1 8: l.lbz r6,0(r4) 9: l.sb 0(r3),r6 l.addi r3,r3,1 l.j 2b l.addi r4,r4,1 1: l.addi r11,r11,1 // r11 holds the return value l.lwz r6,0(r1) l.lwz r4,4(r1) l.lwz r3,8(r1) l.jr r9 l.addi r1,r1,12 .section .fixup, "ax" 99: l.j 1b l.nop .previous .section __ex_table, "a" .long 8b, 99b // read fault .long 9b, 99b // write fault .previous /* * unsigned long clear_user(void *addr, unsigned long size) ; * * NOTE: it returns number of bytes NOT cleared !!! */ .global __clear_user __clear_user: l.addi r1,r1,-8 l.sw 0(r1),r4 l.sw 4(r1),r3 2: l.sfeq r4,r0 l.bf 1f l.addi r4,r4,-1 9: l.sb 0(r3),r0 l.j 2b l.addi r3,r3,1 1: l.addi r11,r4,1 l.lwz r4,0(r1) l.lwz r3,4(r1) l.jr r9 l.addi r1,r1,8 .section .fixup, "ax" 99: l.j 1b l.nop .previous .section __ex_table, "a" .long 9b, 99b // write fault .previous
aixcc-public/challenge-001-exemplar-source
2,118
arch/openrisc/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OpenRISC memset.S * * Hand-optimized assembler version of memset for OpenRISC. * Algorithm inspired by several other arch-specific memset routines * in the kernel tree * * Copyright (C) 2015 Olof Kindgren <olof.kindgren@gmail.com> */ .global memset .type memset, @function memset: /* arguments: * r3 = *s * r4 = c * r5 = n * r13, r15, r17, r19 used as temp regs */ /* Exit if n == 0 */ l.sfeqi r5, 0 l.bf 4f /* Truncate c to char */ l.andi r13, r4, 0xff /* Skip word extension if c is 0 */ l.sfeqi r13, 0 l.bf 1f /* Check for at least two whole words (8 bytes) */ l.sfleui r5, 7 /* Extend char c to 32-bit word cccc in r13 */ l.slli r15, r13, 16 // r13 = 000c, r15 = 0c00 l.or r13, r13, r15 // r13 = 0c0c, r15 = 0c00 l.slli r15, r13, 8 // r13 = 0c0c, r15 = c0c0 l.or r13, r13, r15 // r13 = cccc, r15 = c0c0 1: l.addi r19, r3, 0 // Set r19 = src /* Jump to byte copy loop if less than two words */ l.bf 3f l.or r17, r5, r0 // Set r17 = n /* Mask out two LSBs to check alignment */ l.andi r15, r3, 0x3 /* lsb == 00, jump to word copy loop */ l.sfeqi r15, 0 l.bf 2f l.addi r19, r3, 0 // Set r19 = src /* lsb == 01,10 or 11 */ l.sb 0(r3), r13 // *src = c l.addi r17, r17, -1 // Decrease n l.sfeqi r15, 3 l.bf 2f l.addi r19, r3, 1 // src += 1 /* lsb == 01 or 10 */ l.sb 1(r3), r13 // *(src+1) = c l.addi r17, r17, -1 // Decrease n l.sfeqi r15, 2 l.bf 2f l.addi r19, r3, 2 // src += 2 /* lsb == 01 */ l.sb 2(r3), r13 // *(src+2) = c l.addi r17, r17, -1 // Decrease n l.addi r19, r3, 3 // src += 3 /* Word copy loop */ 2: l.sw 0(r19), r13 // *src = cccc l.addi r17, r17, -4 // Decrease n l.sfgeui r17, 4 l.bf 2b l.addi r19, r19, 4 // Increase src /* When n > 0, copy the remaining bytes, otherwise jump to exit */ l.sfeqi r17, 0 l.bf 4f /* Byte copy loop */ 3: l.addi r17, r17, -1 // Decrease n l.sb 0(r19), r13 // *src = cccc l.sfnei r17, 0 l.bf 3b l.addi r19, r19, 1 // Increase src 4: l.jr r9 l.ori r11, r3, 0
aixcc-public/challenge-001-exemplar-source
8,988
arch/riscv/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Regents of the University of California */ #include <asm/asm-offsets.h> #include <asm/asm.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/csr.h> #include <asm/cpu_ops_sbi.h> #include <asm/hwcap.h> #include <asm/image.h> #include <asm/xip_fixup.h> #include "efi-header.S" __HEAD ENTRY(_start) /* * Image header expected by Linux boot-loaders. The image header data * structure is described in asm/image.h. * Do not modify it without modifying the structure and all bootloaders * that expects this header format!! */ #ifdef CONFIG_EFI /* * This instruction decodes to "MZ" ASCII required by UEFI. */ c.li s4,-13 j _start_kernel #else /* jump to start kernel */ j _start_kernel /* reserved */ .word 0 #endif .balign 8 #ifdef CONFIG_RISCV_M_MODE /* Image load offset (0MB) from start of RAM for M-mode */ .dword 0 #else #if __riscv_xlen == 64 /* Image load offset(2MB) from start of RAM */ .dword 0x200000 #else /* Image load offset(4MB) from start of RAM */ .dword 0x400000 #endif #endif /* Effective size of kernel image */ .dword _end - _start .dword __HEAD_FLAGS .word RISCV_HEADER_VERSION .word 0 .dword 0 .ascii RISCV_IMAGE_MAGIC .balign 4 .ascii RISCV_IMAGE_MAGIC2 #ifdef CONFIG_EFI .word pe_head_start - _start pe_head_start: __EFI_PE_HEADER #else .word 0 #endif .align 2 #ifdef CONFIG_MMU .global relocate_enable_mmu relocate_enable_mmu: /* Relocate return address */ la a1, kernel_map XIP_FIXUP_OFFSET a1 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) la a2, _start sub a1, a1, a2 add ra, ra, a1 /* Point stvec to virtual address of intruction after satp write */ la a2, 1f add a2, a2, a1 csrw CSR_TVEC, a2 /* Compute satp for kernel page tables, but don't load it yet */ srl a2, a0, PAGE_SHIFT la a1, satp_mode REG_L a1, 0(a1) or a2, a2, a1 /* * Load trampoline page directory, which will cause us to trap to * stvec if VA != PA, or simply fall through if VA == PA. We need a * full fence here because setup_vm() just wrote these PTEs and we need * to ensure the new translations are in use. */ la a0, trampoline_pg_dir XIP_FIXUP_OFFSET a0 srl a0, a0, PAGE_SHIFT or a0, a0, a1 sfence.vma csrw CSR_SATP, a0 .align 2 1: /* Set trap vector to spin forever to help debug */ la a0, .Lsecondary_park csrw CSR_TVEC, a0 /* Reload the global pointer */ .option push .option norelax la gp, __global_pointer$ .option pop /* * Switch to kernel page tables. A full fence is necessary in order to * avoid using the trampoline translations, which are only correct for * the first superpage. Fetching the fence is guaranteed to work * because that first superpage is translated the same way. */ csrw CSR_SATP, a2 sfence.vma ret #endif /* CONFIG_MMU */ #ifdef CONFIG_SMP .global secondary_start_sbi secondary_start_sbi: /* Mask all interrupts */ csrw CSR_IE, zero csrw CSR_IP, zero /* Load the global pointer */ .option push .option norelax la gp, __global_pointer$ .option pop /* * Disable FPU to detect illegal usage of * floating point in kernel space */ li t0, SR_FS csrc CSR_STATUS, t0 /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park csrw CSR_TVEC, a3 /* a0 contains the hartid & a1 contains boot data */ li a2, SBI_HART_BOOT_TASK_PTR_OFFSET XIP_FIXUP_OFFSET a2 add a2, a2, a1 REG_L tp, (a2) li a3, SBI_HART_BOOT_STACK_PTR_OFFSET XIP_FIXUP_OFFSET a3 add a3, a3, a1 REG_L sp, (a3) .Lsecondary_start_common: #ifdef CONFIG_MMU /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir XIP_FIXUP_OFFSET a0 call relocate_enable_mmu #endif call setup_trap_vector tail smp_callin #endif /* CONFIG_SMP */ .align 2 setup_trap_vector: /* Set trap vector to exception handler */ la a0, handle_exception csrw CSR_TVEC, a0 /* * Set sup0 scratch register to 0, indicating to exception vector that * we are presently executing in kernel. */ csrw CSR_SCRATCH, zero ret .align 2 .Lsecondary_park: /* We lack SMP support or have too many harts, so park this hart */ wfi j .Lsecondary_park END(_start) ENTRY(_start_kernel) /* Mask all interrupts */ csrw CSR_IE, zero csrw CSR_IP, zero #ifdef CONFIG_RISCV_M_MODE /* flush the instruction cache */ fence.i /* Reset all registers except ra, a0, a1 */ call reset_regs /* * Setup a PMP to permit access to all of memory. Some machines may * not implement PMPs, so we set up a quick trap handler to just skip * touching the PMPs on any trap. */ la a0, pmp_done csrw CSR_TVEC, a0 li a0, -1 csrw CSR_PMPADDR0, a0 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) csrw CSR_PMPCFG0, a0 .align 2 pmp_done: /* * The hartid in a0 is expected later on, and we have no firmware * to hand it to us. */ csrr a0, CSR_MHARTID #endif /* CONFIG_RISCV_M_MODE */ /* Load the global pointer */ .option push .option norelax la gp, __global_pointer$ .option pop /* * Disable FPU to detect illegal usage of * floating point in kernel space */ li t0, SR_FS csrc CSR_STATUS, t0 #ifdef CONFIG_RISCV_BOOT_SPINWAIT li t0, CONFIG_NR_CPUS blt a0, t0, .Lgood_cores tail .Lsecondary_park .Lgood_cores: /* The lottery system is only required for spinwait booting method */ #ifndef CONFIG_XIP_KERNEL /* Pick one hart to run the main boot sequence */ la a3, hart_lottery li a2, 1 amoadd.w a3, a2, (a3) bnez a3, .Lsecondary_start #else /* hart_lottery in flash contains a magic number */ la a3, hart_lottery mv a2, a3 XIP_FIXUP_OFFSET a2 XIP_FIXUP_FLASH_OFFSET a3 lw t1, (a3) amoswap.w t0, t1, (a2) /* first time here if hart_lottery in RAM is not set */ beq t0, t1, .Lsecondary_start #endif /* CONFIG_XIP */ #endif /* CONFIG_RISCV_BOOT_SPINWAIT */ #ifdef CONFIG_XIP_KERNEL la sp, _end + THREAD_SIZE XIP_FIXUP_OFFSET sp mv s0, a0 call __copy_data /* Restore a0 copy */ mv a0, s0 #endif #ifndef CONFIG_XIP_KERNEL /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop ble a4, a3, clear_bss_done clear_bss: REG_S zero, (a3) add a3, a3, RISCV_SZPTR blt a3, a4, clear_bss clear_bss_done: #endif /* Save hart ID and DTB physical address */ mv s0, a0 mv s1, a1 la a2, boot_cpu_hartid XIP_FIXUP_OFFSET a2 REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ la tp, init_task la sp, init_thread_union + THREAD_SIZE XIP_FIXUP_OFFSET sp #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start XIP_FIXUP_OFFSET a0 #else mv a0, s1 #endif /* CONFIG_BUILTIN_DTB */ call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir XIP_FIXUP_OFFSET a0 call relocate_enable_mmu #endif /* CONFIG_MMU */ call setup_trap_vector /* Restore C environment */ la tp, init_task la sp, init_thread_union + THREAD_SIZE #ifdef CONFIG_KASAN call kasan_early_init #endif /* Start the kernel */ call soc_early_init tail start_kernel #ifdef CONFIG_RISCV_BOOT_SPINWAIT .Lsecondary_start: /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park csrw CSR_TVEC, a3 slli a3, a0, LGREG la a1, __cpu_spinwait_stack_pointer XIP_FIXUP_OFFSET a1 la a2, __cpu_spinwait_task_pointer XIP_FIXUP_OFFSET a2 add a1, a3, a1 add a2, a3, a2 /* * This hart didn't win the lottery, so we wait for the winning hart to * get far enough along the boot process that it should continue. */ .Lwait_for_cpu_up: /* FIXME: We should WFI to save some energy here. */ REG_L sp, (a1) REG_L tp, (a2) beqz sp, .Lwait_for_cpu_up beqz tp, .Lwait_for_cpu_up fence tail .Lsecondary_start_common #endif /* CONFIG_RISCV_BOOT_SPINWAIT */ END(_start_kernel) #ifdef CONFIG_RISCV_M_MODE ENTRY(reset_regs) li sp, 0 li gp, 0 li tp, 0 li t0, 0 li t1, 0 li t2, 0 li s0, 0 li s1, 0 li a2, 0 li a3, 0 li a4, 0 li a5, 0 li a6, 0 li a7, 0 li s2, 0 li s3, 0 li s4, 0 li s5, 0 li s6, 0 li s7, 0 li s8, 0 li s9, 0 li s10, 0 li s11, 0 li t3, 0 li t4, 0 li t5, 0 li t6, 0 csrw CSR_SCRATCH, 0 #ifdef CONFIG_FPU csrr t0, CSR_MISA andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) beqz t0, .Lreset_regs_done li t1, SR_FS csrs CSR_STATUS, t1 fmv.s.x f0, zero fmv.s.x f1, zero fmv.s.x f2, zero fmv.s.x f3, zero fmv.s.x f4, zero fmv.s.x f5, zero fmv.s.x f6, zero fmv.s.x f7, zero fmv.s.x f8, zero fmv.s.x f9, zero fmv.s.x f10, zero fmv.s.x f11, zero fmv.s.x f12, zero fmv.s.x f13, zero fmv.s.x f14, zero fmv.s.x f15, zero fmv.s.x f16, zero fmv.s.x f17, zero fmv.s.x f18, zero fmv.s.x f19, zero fmv.s.x f20, zero fmv.s.x f21, zero fmv.s.x f22, zero fmv.s.x f23, zero fmv.s.x f24, zero fmv.s.x f25, zero fmv.s.x f26, zero fmv.s.x f27, zero fmv.s.x f28, zero fmv.s.x f29, zero fmv.s.x f30, zero fmv.s.x f31, zero csrw fcsr, 0 /* note that the caller must clear SR_FS */ #endif /* CONFIG_FPU */ .Lreset_regs_done: ret END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */
aixcc-public/challenge-001-exemplar-source
3,542
arch/riscv/kernel/efi-header.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. * Adapted from arch/arm64/kernel/efi-header.S */ #include <linux/pe.h> #include <linux/sizes.h> .macro __EFI_PE_HEADER .long PE_MAGIC coff_header: #ifdef CONFIG_64BIT .short IMAGE_FILE_MACHINE_RISCV64 // Machine #else .short IMAGE_FILE_MACHINE_RISCV32 // Machine #endif .short section_count // NumberOfSections .long 0 // TimeDateStamp .long 0 // PointerToSymbolTable .long 0 // NumberOfSymbols .short section_table - optional_header // SizeOfOptionalHeader .short IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics optional_header: #ifdef CONFIG_64BIT .short PE_OPT_MAGIC_PE32PLUS // PE32+ format #else .short PE_OPT_MAGIC_PE32 // PE32 format #endif .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion .long __pecoff_text_end - efi_header_end // SizeOfCode .long __pecoff_data_virt_size // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_efi_pe_entry - _start // AddressOfEntryPoint .long efi_header_end - _start // BaseOfCode #ifdef CONFIG_32BIT .long __pecoff_text_end - _start // BaseOfData #endif extra_header_fields: .quad 0 // ImageBase .long PECOFF_SECTION_ALIGNMENT // SectionAlignment .long PECOFF_FILE_ALIGNMENT // FileAlignment .short 0 // MajorOperatingSystemVersion .short 0 // MinorOperatingSystemVersion .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion .short 0 // MajorSubsystemVersion .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue .long _end - _start // SizeOfImage // Everything before the kernel image is considered part of the header .long efi_header_end - _start // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem .short 0 // DllCharacteristics .quad 0 // SizeOfStackReserve .quad 0 // SizeOfStackCommit .quad 0 // SizeOfHeapReserve .quad 0 // SizeOfHeapCommit .long 0 // LoaderFlags .long (section_table - .) / 8 // NumberOfRvaAndSizes .quad 0 // ExportTable .quad 0 // ImportTable .quad 0 // ResourceTable .quad 0 // ExceptionTable .quad 0 // CertificationTable .quad 0 // BaseRelocationTable // Section table section_table: .ascii ".text\0\0\0" .long __pecoff_text_end - efi_header_end // VirtualSize .long efi_header_end - _start // VirtualAddress .long __pecoff_text_end - efi_header_end // SizeOfRawData .long efi_header_end - _start // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_CODE | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE // Characteristics .ascii ".data\0\0\0" .long __pecoff_data_virt_size // VirtualSize .long __pecoff_text_end - _start // VirtualAddress .long __pecoff_data_raw_size // SizeOfRawData .long __pecoff_text_end - _start // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers .short 0 // NumberOfRelocations .short 0 // NumberOfLineNumbers .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE // Characteristics .set section_count, (. - section_table) / 40 .balign 0x1000 efi_header_end: .endm
aixcc-public/challenge-001-exemplar-source
14,802
arch/riscv/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/errata_list.h> #if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all #endif ENTRY(handle_exception) /* * If coming from userspace, preserve the user thread pointer and load * the kernel thread pointer. If we came from the kernel, the scratch * register will contain 0, and we should continue on the current TP. */ csrrw tp, CSR_SCRATCH, tp bnez tp, _save_context _restore_kernel_tpsp: csrr tp, CSR_SCRATCH REG_S sp, TASK_TI_KERNEL_SP(tp) #ifdef CONFIG_VMAP_STACK addi sp, sp, -(PT_SIZE_ON_STACK) srli sp, sp, THREAD_SHIFT andi sp, sp, 0x1 bnez sp, handle_kernel_stack_overflow REG_L sp, TASK_TI_KERNEL_SP(tp) #endif _save_context: REG_S sp, TASK_TI_USER_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp) addi sp, sp, -(PT_SIZE_ON_STACK) REG_S x1, PT_RA(sp) REG_S x3, PT_GP(sp) REG_S x5, PT_T0(sp) REG_S x6, PT_T1(sp) REG_S x7, PT_T2(sp) REG_S x8, PT_S0(sp) REG_S x9, PT_S1(sp) REG_S x10, PT_A0(sp) REG_S x11, PT_A1(sp) REG_S x12, PT_A2(sp) REG_S x13, PT_A3(sp) REG_S x14, PT_A4(sp) REG_S x15, PT_A5(sp) REG_S x16, PT_A6(sp) REG_S x17, PT_A7(sp) REG_S x18, PT_S2(sp) REG_S x19, PT_S3(sp) REG_S x20, PT_S4(sp) REG_S x21, PT_S5(sp) REG_S x22, PT_S6(sp) REG_S x23, PT_S7(sp) REG_S x24, PT_S8(sp) REG_S x25, PT_S9(sp) REG_S x26, PT_S10(sp) REG_S x27, PT_S11(sp) REG_S x28, PT_T3(sp) REG_S x29, PT_T4(sp) REG_S x30, PT_T5(sp) REG_S x31, PT_T6(sp) /* * Disable user-mode memory access as it should only be set in the * actual user copy routines. * * Disable the FPU to detect illegal usage of floating point in kernel * space. */ li t0, SR_SUM | SR_FS REG_L s0, TASK_TI_USER_SP(tp) csrrc s1, CSR_STATUS, t0 csrr s2, CSR_EPC csrr s3, CSR_TVAL csrr s4, CSR_CAUSE csrr s5, CSR_SCRATCH REG_S s0, PT_SP(sp) REG_S s1, PT_STATUS(sp) REG_S s2, PT_EPC(sp) REG_S s3, PT_BADADDR(sp) REG_S s4, PT_CAUSE(sp) REG_S s5, PT_TP(sp) /* * Set the scratch register to 0, so that if a recursive exception * occurs, the exception vector knows it came from the kernel */ csrw CSR_SCRATCH, x0 /* Load the global pointer */ .option push .option norelax la gp, __global_pointer$ .option pop #ifdef CONFIG_TRACE_IRQFLAGS call __trace_hardirqs_off #endif #ifdef CONFIG_CONTEXT_TRACKING_USER /* If previous state is in user mode, call user_exit_callable(). */ li a0, SR_PP and a0, s1, a0 bnez a0, skip_context_tracking call user_exit_callable skip_context_tracking: #endif /* * MSB of cause differentiates between * interrupts and exceptions */ bge s4, zero, 1f la ra, ret_from_exception /* Handle interrupts */ move a0, sp /* pt_regs */ la a1, generic_handle_arch_irq jr a1 1: /* * Exceptions run with interrupts enabled or disabled depending on the * state of SR_PIE in m/sstatus. */ andi t0, s1, SR_PIE beqz t0, 1f /* kprobes, entered via ebreak, must have interrupts disabled. */ li t0, EXC_BREAKPOINT beq s4, t0, 1f #ifdef CONFIG_TRACE_IRQFLAGS call __trace_hardirqs_on #endif csrs CSR_STATUS, SR_IE 1: la ra, ret_from_exception /* Handle syscalls */ li t0, EXC_SYSCALL beq s4, t0, handle_syscall /* Handle other exceptions */ slli t0, s4, RISCV_LGPTR la t1, excp_vect_table la t2, excp_vect_table_end move a0, sp /* pt_regs */ add t0, t1, t0 /* Check if exception code lies within bounds */ bgeu t0, t2, 1f REG_L t0, 0(t0) jr t0 1: tail do_trap_unknown handle_syscall: #ifdef CONFIG_RISCV_M_MODE /* * When running is M-Mode (no MMU config), MPIE does not get set. * As a result, we need to force enable interrupts here because * handle_exception did not do set SR_IE as it always sees SR_PIE * being cleared. */ csrs CSR_STATUS, SR_IE #endif #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) /* Recover a0 - a7 for system calls */ REG_L a0, PT_A0(sp) REG_L a1, PT_A1(sp) REG_L a2, PT_A2(sp) REG_L a3, PT_A3(sp) REG_L a4, PT_A4(sp) REG_L a5, PT_A5(sp) REG_L a6, PT_A6(sp) REG_L a7, PT_A7(sp) #endif /* save the initial A0 value (needed in signal handlers) */ REG_S a0, PT_ORIG_A0(sp) /* * Advance SEPC to avoid executing the original * scall instruction on sret */ addi s2, s2, 0x4 REG_S s2, PT_EPC(sp) /* Trace syscalls, but only if requested by the user. */ REG_L t0, TASK_TI_FLAGS(tp) andi t0, t0, _TIF_SYSCALL_WORK bnez t0, handle_syscall_trace_enter check_syscall_nr: /* Check to make sure we don't jump to a bogus syscall number. */ li t0, __NR_syscalls la s0, sys_ni_syscall /* * Syscall number held in a7. * If syscall number is above allowed value, redirect to ni_syscall. */ bgeu a7, t0, 3f #ifdef CONFIG_COMPAT REG_L s0, PT_STATUS(sp) srli s0, s0, SR_UXL_SHIFT andi s0, s0, (SR_UXL >> SR_UXL_SHIFT) li t0, (SR_UXL_32 >> SR_UXL_SHIFT) sub t0, s0, t0 bnez t0, 1f /* Call compat_syscall */ la s0, compat_sys_call_table j 2f 1: #endif /* Call syscall */ la s0, sys_call_table 2: slli t0, a7, RISCV_LGPTR add s0, s0, t0 REG_L s0, 0(s0) 3: jalr s0 ret_from_syscall: /* Set user a0 to kernel a0 */ REG_S a0, PT_A0(sp) /* * We didn't execute the actual syscall. * Seccomp already set return value for the current task pt_regs. * (If it was configured with SECCOMP_RET_ERRNO/TRACE) */ ret_from_syscall_rejected: #ifdef CONFIG_DEBUG_RSEQ move a0, sp call rseq_syscall #endif /* Trace syscalls, but only if requested by the user. */ REG_L t0, TASK_TI_FLAGS(tp) andi t0, t0, _TIF_SYSCALL_WORK bnez t0, handle_syscall_trace_exit ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE #ifdef CONFIG_TRACE_IRQFLAGS call __trace_hardirqs_off #endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ li t0, SR_MPP and s0, s0, t0 #else andi s0, s0, SR_SPP #endif bnez s0, resume_kernel /* Interrupts must be disabled here so flags are checked atomically */ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ andi s1, s0, _TIF_WORK_MASK bnez s1, resume_userspace_slow resume_userspace: #ifdef CONFIG_CONTEXT_TRACKING_USER call user_enter_callable #endif /* Save unwound kernel stack pointer in thread_info */ addi s0, sp, PT_SIZE_ON_STACK REG_S s0, TASK_TI_KERNEL_SP(tp) /* * Save TP into the scratch register , so we can find the kernel data * structures again. */ csrw CSR_SCRATCH, tp restore_all: #ifdef CONFIG_TRACE_IRQFLAGS REG_L s1, PT_STATUS(sp) andi t0, s1, SR_PIE beqz t0, 1f call __trace_hardirqs_on j 2f 1: call __trace_hardirqs_off 2: #endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's * state, in the sense that load reservations cannot be shared between * different hart contexts. We can't actually save and restore a load * reservation, so instead here we clear any existing reservation -- * it's always legal for implementations to clear load reservations at * any point (as long as the forward progress guarantee is kept, but * we'll ignore that here). * * Dangling load reservations can be the result of taking a trap in the * middle of an LR/SC sequence, but can also be the result of a taken * forward branch around an SC -- which is how we implement CAS. As a * result we need to clear reservations between the last CAS and the * jump back to the new context. While it is unlikely the store * completes, implementations are allowed to expand reservations to be * arbitrarily large. */ REG_L a2, PT_EPC(sp) REG_SC x0, a2, PT_EPC(sp) csrw CSR_STATUS, a0 csrw CSR_EPC, a2 REG_L x1, PT_RA(sp) REG_L x3, PT_GP(sp) REG_L x4, PT_TP(sp) REG_L x5, PT_T0(sp) REG_L x6, PT_T1(sp) REG_L x7, PT_T2(sp) REG_L x8, PT_S0(sp) REG_L x9, PT_S1(sp) REG_L x10, PT_A0(sp) REG_L x11, PT_A1(sp) REG_L x12, PT_A2(sp) REG_L x13, PT_A3(sp) REG_L x14, PT_A4(sp) REG_L x15, PT_A5(sp) REG_L x16, PT_A6(sp) REG_L x17, PT_A7(sp) REG_L x18, PT_S2(sp) REG_L x19, PT_S3(sp) REG_L x20, PT_S4(sp) REG_L x21, PT_S5(sp) REG_L x22, PT_S6(sp) REG_L x23, PT_S7(sp) REG_L x24, PT_S8(sp) REG_L x25, PT_S9(sp) REG_L x26, PT_S10(sp) REG_L x27, PT_S11(sp) REG_L x28, PT_T3(sp) REG_L x29, PT_T4(sp) REG_L x30, PT_T5(sp) REG_L x31, PT_T6(sp) REG_L x2, PT_SP(sp) #ifdef CONFIG_RISCV_M_MODE mret #else sret #endif #if IS_ENABLED(CONFIG_PREEMPTION) resume_kernel: REG_L s0, TASK_TI_PREEMPT_COUNT(tp) bnez s0, restore_all REG_L s0, TASK_TI_FLAGS(tp) andi s0, s0, _TIF_NEED_RESCHED beqz s0, restore_all call preempt_schedule_irq j restore_all #endif resume_userspace_slow: /* Enter slow path for supplementary processing */ move a0, sp /* pt_regs */ move a1, s0 /* current_thread_info->flags */ call do_work_pending j resume_userspace /* Slow paths for ptrace. */ handle_syscall_trace_enter: move a0, sp call do_syscall_trace_enter move t0, a0 REG_L a0, PT_A0(sp) REG_L a1, PT_A1(sp) REG_L a2, PT_A2(sp) REG_L a3, PT_A3(sp) REG_L a4, PT_A4(sp) REG_L a5, PT_A5(sp) REG_L a6, PT_A6(sp) REG_L a7, PT_A7(sp) bnez t0, ret_from_syscall_rejected j check_syscall_nr handle_syscall_trace_exit: move a0, sp call do_syscall_trace_exit j ret_from_exception #ifdef CONFIG_VMAP_STACK handle_kernel_stack_overflow: /* * Takes the psuedo-spinlock for the shadow stack, in case multiple * harts are concurrently overflowing their kernel stacks. We could * store any value here, but since we're overflowing the kernel stack * already we only have SP to use as a scratch register. So we just * swap in the address of the spinlock, as that's definately non-zero. * * Pairs with a store_release in handle_bad_stack(). */ 1: la sp, spin_shadow_stack REG_AMOSWAP_AQ sp, sp, (sp) bnez sp, 1b la sp, shadow_stack addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE //save caller register to shadow stack addi sp, sp, -(PT_SIZE_ON_STACK) REG_S x1, PT_RA(sp) REG_S x5, PT_T0(sp) REG_S x6, PT_T1(sp) REG_S x7, PT_T2(sp) REG_S x10, PT_A0(sp) REG_S x11, PT_A1(sp) REG_S x12, PT_A2(sp) REG_S x13, PT_A3(sp) REG_S x14, PT_A4(sp) REG_S x15, PT_A5(sp) REG_S x16, PT_A6(sp) REG_S x17, PT_A7(sp) REG_S x28, PT_T3(sp) REG_S x29, PT_T4(sp) REG_S x30, PT_T5(sp) REG_S x31, PT_T6(sp) la ra, restore_caller_reg tail get_overflow_stack restore_caller_reg: //save per-cpu overflow stack REG_S a0, -8(sp) //restore caller register from shadow_stack REG_L x1, PT_RA(sp) REG_L x5, PT_T0(sp) REG_L x6, PT_T1(sp) REG_L x7, PT_T2(sp) REG_L x10, PT_A0(sp) REG_L x11, PT_A1(sp) REG_L x12, PT_A2(sp) REG_L x13, PT_A3(sp) REG_L x14, PT_A4(sp) REG_L x15, PT_A5(sp) REG_L x16, PT_A6(sp) REG_L x17, PT_A7(sp) REG_L x28, PT_T3(sp) REG_L x29, PT_T4(sp) REG_L x30, PT_T5(sp) REG_L x31, PT_T6(sp) //load per-cpu overflow stack REG_L sp, -8(sp) addi sp, sp, -(PT_SIZE_ON_STACK) //save context to overflow stack REG_S x1, PT_RA(sp) REG_S x3, PT_GP(sp) REG_S x5, PT_T0(sp) REG_S x6, PT_T1(sp) REG_S x7, PT_T2(sp) REG_S x8, PT_S0(sp) REG_S x9, PT_S1(sp) REG_S x10, PT_A0(sp) REG_S x11, PT_A1(sp) REG_S x12, PT_A2(sp) REG_S x13, PT_A3(sp) REG_S x14, PT_A4(sp) REG_S x15, PT_A5(sp) REG_S x16, PT_A6(sp) REG_S x17, PT_A7(sp) REG_S x18, PT_S2(sp) REG_S x19, PT_S3(sp) REG_S x20, PT_S4(sp) REG_S x21, PT_S5(sp) REG_S x22, PT_S6(sp) REG_S x23, PT_S7(sp) REG_S x24, PT_S8(sp) REG_S x25, PT_S9(sp) REG_S x26, PT_S10(sp) REG_S x27, PT_S11(sp) REG_S x28, PT_T3(sp) REG_S x29, PT_T4(sp) REG_S x30, PT_T5(sp) REG_S x31, PT_T6(sp) REG_L s0, TASK_TI_KERNEL_SP(tp) csrr s1, CSR_STATUS csrr s2, CSR_EPC csrr s3, CSR_TVAL csrr s4, CSR_CAUSE csrr s5, CSR_SCRATCH REG_S s0, PT_SP(sp) REG_S s1, PT_STATUS(sp) REG_S s2, PT_EPC(sp) REG_S s3, PT_BADADDR(sp) REG_S s4, PT_CAUSE(sp) REG_S s5, PT_TP(sp) move a0, sp tail handle_bad_stack #endif END(handle_exception) ENTRY(ret_from_fork) la ra, ret_from_exception tail schedule_tail ENDPROC(ret_from_fork) ENTRY(ret_from_kernel_thread) call schedule_tail /* Call fn(arg) */ la ra, ret_from_exception move a0, s1 jr s0 ENDPROC(ret_from_kernel_thread) /* * Integer register context switch * The callee-saved registers must be saved and restored. * * a0: previous task_struct (must be preserved across the switch) * a1: next task_struct * * The value of a0 and a1 must be preserved by this function, as that's how * arguments are passed to schedule_tail. */ ENTRY(__switch_to) /* Save context into prev->thread */ li a4, TASK_THREAD_RA add a3, a0, a4 add a4, a1, a4 REG_S ra, TASK_THREAD_RA_RA(a3) REG_S sp, TASK_THREAD_SP_RA(a3) REG_S s0, TASK_THREAD_S0_RA(a3) REG_S s1, TASK_THREAD_S1_RA(a3) REG_S s2, TASK_THREAD_S2_RA(a3) REG_S s3, TASK_THREAD_S3_RA(a3) REG_S s4, TASK_THREAD_S4_RA(a3) REG_S s5, TASK_THREAD_S5_RA(a3) REG_S s6, TASK_THREAD_S6_RA(a3) REG_S s7, TASK_THREAD_S7_RA(a3) REG_S s8, TASK_THREAD_S8_RA(a3) REG_S s9, TASK_THREAD_S9_RA(a3) REG_S s10, TASK_THREAD_S10_RA(a3) REG_S s11, TASK_THREAD_S11_RA(a3) /* Restore context from next->thread */ REG_L ra, TASK_THREAD_RA_RA(a4) REG_L sp, TASK_THREAD_SP_RA(a4) REG_L s0, TASK_THREAD_S0_RA(a4) REG_L s1, TASK_THREAD_S1_RA(a4) REG_L s2, TASK_THREAD_S2_RA(a4) REG_L s3, TASK_THREAD_S3_RA(a4) REG_L s4, TASK_THREAD_S4_RA(a4) REG_L s5, TASK_THREAD_S5_RA(a4) REG_L s6, TASK_THREAD_S6_RA(a4) REG_L s7, TASK_THREAD_S7_RA(a4) REG_L s8, TASK_THREAD_S8_RA(a4) REG_L s9, TASK_THREAD_S9_RA(a4) REG_L s10, TASK_THREAD_S10_RA(a4) REG_L s11, TASK_THREAD_S11_RA(a4) /* The offset of thread_info in task_struct is zero. */ move tp, a1 ret ENDPROC(__switch_to) #ifndef CONFIG_MMU #define do_page_fault do_trap_unknown #endif .section ".rodata" .align LGREG /* Exception vector table */ ENTRY(excp_vect_table) RISCV_PTR do_trap_insn_misaligned ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal RISCV_PTR do_trap_break RISCV_PTR do_trap_load_misaligned RISCV_PTR do_trap_load_fault RISCV_PTR do_trap_store_misaligned RISCV_PTR do_trap_store_fault RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m /* instruciton page fault */ ALT_PAGE_FAULT(RISCV_PTR do_page_fault) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ excp_vect_table_end: END(excp_vect_table) #ifndef CONFIG_MMU ENTRY(__user_rt_sigreturn) li a7, __NR_rt_sigreturn scall END(__user_rt_sigreturn) #endif
aixcc-public/challenge-001-exemplar-source
3,782
arch/riscv/kernel/suspend_entry.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2021 Western Digital Corporation or its affiliates. * Copyright (c) 2022 Ventana Micro Systems Inc. */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/csr.h> #include <asm/xip_fixup.h> .text .altmacro .option norelax ENTRY(__cpu_suspend_enter) /* Save registers (except A0 and T0-T6) */ REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0) REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0) REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0) REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0) REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0) REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0) REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0) REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0) REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0) REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0) REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0) REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0) REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0) REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0) REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0) REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0) REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0) REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0) REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0) REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0) REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0) /* Save CSRs */ csrr t0, CSR_EPC REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) csrr t0, CSR_STATUS REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0) csrr t0, CSR_TVAL REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0) csrr t0, CSR_CAUSE REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0) /* Return non-zero value */ li a0, 1 /* Return to C code */ ret END(__cpu_suspend_enter) ENTRY(__cpu_resume_enter) /* Load the global pointer */ .option push .option norelax la gp, __global_pointer$ .option pop #ifdef CONFIG_MMU /* Save A0 and A1 */ add t0, a0, zero add t1, a1, zero /* Enable MMU */ la a0, swapper_pg_dir XIP_FIXUP_OFFSET a0 call relocate_enable_mmu /* Restore A0 and A1 */ add a0, t0, zero add a1, t1, zero #endif /* Make A0 point to suspend context */ add a0, a1, zero /* Restore CSRs */ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) csrw CSR_EPC, t0 REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0) csrw CSR_STATUS, t0 REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0) csrw CSR_TVAL, t0 REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0) csrw CSR_CAUSE, t0 /* Restore registers (except A0 and T0-T6) */ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0) REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0) REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0) REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0) REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0) REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0) REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0) REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0) REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0) REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0) REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0) REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0) REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0) REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0) REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0) REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0) REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0) REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0) REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0) REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0) REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0) /* Return zero value */ add a0, zero, zero /* Return to C code */ ret END(__cpu_resume_enter)
aixcc-public/challenge-001-exemplar-source
2,908
arch/riscv/kernel/vmlinux-xip.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive * Copyright (C) 2020 Vitaly Wool, Konsulko AB */ #include <asm/pgtable.h> #define LOAD_OFFSET KERNEL_LINK_ADDR /* No __ro_after_init data in the .rodata section - which will always be ro */ #define RO_AFTER_INIT_DATA #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/cache.h> #include <asm/thread_info.h> OUTPUT_ARCH(riscv) ENTRY(_start) jiffies = jiffies_64; SECTIONS { /* Beginning of code and text segment */ . = LOAD_OFFSET; _xiprom = .; _start = .; HEAD_TEXT_SECTION INIT_TEXT_SECTION(PAGE_SIZE) /* we have to discard exit text and such at runtime, not link time */ .exit.text : { EXIT_TEXT } .text : { _text = .; _stext = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ENTRY_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT _etext = .; } RO_DATA(L1_CACHE_BYTES) .srodata : { *(.srodata*) } .init.rodata : { INIT_SETUP(16) INIT_CALLS CON_INITCALL INIT_RAM_FS } _exiprom = .; /* End of XIP ROM area */ /* * From this point, stuff is considered writable and will be copied to RAM */ __data_loc = ALIGN(PAGE_SIZE); /* location in file */ . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ #undef LOAD_OFFSET #define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) _sdata = .; /* Start of data section */ _data = .; RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; __start_ro_after_init = .; .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { *(.data..ro_after_init) } __end_ro_after_init = .; . = ALIGN(PAGE_SIZE); __init_begin = .; .init.data : { INIT_DATA } .exit.data : { EXIT_DATA } . = ALIGN(8); __soc_early_init_table : { __soc_early_init_table_start = .; KEEP(*(__soc_early_init_table)) __soc_early_init_table_end = .; } __soc_builtin_dtb_table : { __soc_builtin_dtb_table_start = .; KEEP(*(__soc_builtin_dtb_table)) __soc_builtin_dtb_table_end = .; } . = ALIGN(8); .alternative : { __alt_start = .; *(.alternative) __alt_end = .; } __init_end = .; . = ALIGN(16); .xip.traps : { __xip_traps_start = .; *(.xip.traps) __xip_traps_end = .; } . = ALIGN(PAGE_SIZE); .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) *(.sbss*) } BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) PERCPU_SECTION(L1_CACHE_BYTES) .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { *(.rel.dyn*) } /* * End of copied data. We need a dummy section to get its LMA. * Also located before final ALIGN() as trailing padding is not stored * in the resulting binary file and useless to copy. */ .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } _edata_loc = LOADADDR(.data.endmark); . = ALIGN(PAGE_SIZE); _end = .; STABS_DEBUG DWARF_DEBUG DISCARDS }
aixcc-public/challenge-001-exemplar-source
3,167
arch/riscv/kernel/fpu.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/asm-offsets.h> ENTRY(__fstate_save) li a2, TASK_THREAD_F0 add a0, a0, a2 li t1, SR_FS csrs CSR_STATUS, t1 frcsr t0 fsd f0, TASK_THREAD_F0_F0(a0) fsd f1, TASK_THREAD_F1_F0(a0) fsd f2, TASK_THREAD_F2_F0(a0) fsd f3, TASK_THREAD_F3_F0(a0) fsd f4, TASK_THREAD_F4_F0(a0) fsd f5, TASK_THREAD_F5_F0(a0) fsd f6, TASK_THREAD_F6_F0(a0) fsd f7, TASK_THREAD_F7_F0(a0) fsd f8, TASK_THREAD_F8_F0(a0) fsd f9, TASK_THREAD_F9_F0(a0) fsd f10, TASK_THREAD_F10_F0(a0) fsd f11, TASK_THREAD_F11_F0(a0) fsd f12, TASK_THREAD_F12_F0(a0) fsd f13, TASK_THREAD_F13_F0(a0) fsd f14, TASK_THREAD_F14_F0(a0) fsd f15, TASK_THREAD_F15_F0(a0) fsd f16, TASK_THREAD_F16_F0(a0) fsd f17, TASK_THREAD_F17_F0(a0) fsd f18, TASK_THREAD_F18_F0(a0) fsd f19, TASK_THREAD_F19_F0(a0) fsd f20, TASK_THREAD_F20_F0(a0) fsd f21, TASK_THREAD_F21_F0(a0) fsd f22, TASK_THREAD_F22_F0(a0) fsd f23, TASK_THREAD_F23_F0(a0) fsd f24, TASK_THREAD_F24_F0(a0) fsd f25, TASK_THREAD_F25_F0(a0) fsd f26, TASK_THREAD_F26_F0(a0) fsd f27, TASK_THREAD_F27_F0(a0) fsd f28, TASK_THREAD_F28_F0(a0) fsd f29, TASK_THREAD_F29_F0(a0) fsd f30, TASK_THREAD_F30_F0(a0) fsd f31, TASK_THREAD_F31_F0(a0) sw t0, TASK_THREAD_FCSR_F0(a0) csrc CSR_STATUS, t1 ret ENDPROC(__fstate_save) ENTRY(__fstate_restore) li a2, TASK_THREAD_F0 add a0, a0, a2 li t1, SR_FS lw t0, TASK_THREAD_FCSR_F0(a0) csrs CSR_STATUS, t1 fld f0, TASK_THREAD_F0_F0(a0) fld f1, TASK_THREAD_F1_F0(a0) fld f2, TASK_THREAD_F2_F0(a0) fld f3, TASK_THREAD_F3_F0(a0) fld f4, TASK_THREAD_F4_F0(a0) fld f5, TASK_THREAD_F5_F0(a0) fld f6, TASK_THREAD_F6_F0(a0) fld f7, TASK_THREAD_F7_F0(a0) fld f8, TASK_THREAD_F8_F0(a0) fld f9, TASK_THREAD_F9_F0(a0) fld f10, TASK_THREAD_F10_F0(a0) fld f11, TASK_THREAD_F11_F0(a0) fld f12, TASK_THREAD_F12_F0(a0) fld f13, TASK_THREAD_F13_F0(a0) fld f14, TASK_THREAD_F14_F0(a0) fld f15, TASK_THREAD_F15_F0(a0) fld f16, TASK_THREAD_F16_F0(a0) fld f17, TASK_THREAD_F17_F0(a0) fld f18, TASK_THREAD_F18_F0(a0) fld f19, TASK_THREAD_F19_F0(a0) fld f20, TASK_THREAD_F20_F0(a0) fld f21, TASK_THREAD_F21_F0(a0) fld f22, TASK_THREAD_F22_F0(a0) fld f23, TASK_THREAD_F23_F0(a0) fld f24, TASK_THREAD_F24_F0(a0) fld f25, TASK_THREAD_F25_F0(a0) fld f26, TASK_THREAD_F26_F0(a0) fld f27, TASK_THREAD_F27_F0(a0) fld f28, TASK_THREAD_F28_F0(a0) fld f29, TASK_THREAD_F29_F0(a0) fld f30, TASK_THREAD_F30_F0(a0) fld f31, TASK_THREAD_F31_F0(a0) fscsr t0 csrc CSR_STATUS, t1 ret ENDPROC(__fstate_restore)
aixcc-public/challenge-001-exemplar-source
3,780
arch/riscv/kernel/mcount-dyn.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2017 Andes Technology Corporation */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm-generic/export.h> #include <asm/ftrace.h> .text #define FENTRY_RA_OFFSET 8 #define ABI_SIZE_ON_STACK 80 #define ABI_A0 0 #define ABI_A1 8 #define ABI_A2 16 #define ABI_A3 24 #define ABI_A4 32 #define ABI_A5 40 #define ABI_A6 48 #define ABI_A7 56 #define ABI_T0 64 #define ABI_RA 72 .macro SAVE_ABI addi sp, sp, -ABI_SIZE_ON_STACK REG_S a0, ABI_A0(sp) REG_S a1, ABI_A1(sp) REG_S a2, ABI_A2(sp) REG_S a3, ABI_A3(sp) REG_S a4, ABI_A4(sp) REG_S a5, ABI_A5(sp) REG_S a6, ABI_A6(sp) REG_S a7, ABI_A7(sp) REG_S t0, ABI_T0(sp) REG_S ra, ABI_RA(sp) .endm .macro RESTORE_ABI REG_L a0, ABI_A0(sp) REG_L a1, ABI_A1(sp) REG_L a2, ABI_A2(sp) REG_L a3, ABI_A3(sp) REG_L a4, ABI_A4(sp) REG_L a5, ABI_A5(sp) REG_L a6, ABI_A6(sp) REG_L a7, ABI_A7(sp) REG_L t0, ABI_T0(sp) REG_L ra, ABI_RA(sp) addi sp, sp, ABI_SIZE_ON_STACK .endm #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS .macro SAVE_ALL addi sp, sp, -PT_SIZE_ON_STACK REG_S t0, PT_EPC(sp) REG_S x1, PT_RA(sp) REG_S x2, PT_SP(sp) REG_S x3, PT_GP(sp) REG_S x4, PT_TP(sp) REG_S x5, PT_T0(sp) REG_S x6, PT_T1(sp) REG_S x7, PT_T2(sp) REG_S x8, PT_S0(sp) REG_S x9, PT_S1(sp) REG_S x10, PT_A0(sp) REG_S x11, PT_A1(sp) REG_S x12, PT_A2(sp) REG_S x13, PT_A3(sp) REG_S x14, PT_A4(sp) REG_S x15, PT_A5(sp) REG_S x16, PT_A6(sp) REG_S x17, PT_A7(sp) REG_S x18, PT_S2(sp) REG_S x19, PT_S3(sp) REG_S x20, PT_S4(sp) REG_S x21, PT_S5(sp) REG_S x22, PT_S6(sp) REG_S x23, PT_S7(sp) REG_S x24, PT_S8(sp) REG_S x25, PT_S9(sp) REG_S x26, PT_S10(sp) REG_S x27, PT_S11(sp) REG_S x28, PT_T3(sp) REG_S x29, PT_T4(sp) REG_S x30, PT_T5(sp) REG_S x31, PT_T6(sp) .endm .macro RESTORE_ALL REG_L t0, PT_EPC(sp) REG_L x1, PT_RA(sp) REG_L x2, PT_SP(sp) REG_L x3, PT_GP(sp) REG_L x4, PT_TP(sp) REG_L x6, PT_T1(sp) REG_L x7, PT_T2(sp) REG_L x8, PT_S0(sp) REG_L x9, PT_S1(sp) REG_L x10, PT_A0(sp) REG_L x11, PT_A1(sp) REG_L x12, PT_A2(sp) REG_L x13, PT_A3(sp) REG_L x14, PT_A4(sp) REG_L x15, PT_A5(sp) REG_L x16, PT_A6(sp) REG_L x17, PT_A7(sp) REG_L x18, PT_S2(sp) REG_L x19, PT_S3(sp) REG_L x20, PT_S4(sp) REG_L x21, PT_S5(sp) REG_L x22, PT_S6(sp) REG_L x23, PT_S7(sp) REG_L x24, PT_S8(sp) REG_L x25, PT_S9(sp) REG_L x26, PT_S10(sp) REG_L x27, PT_S11(sp) REG_L x28, PT_T3(sp) REG_L x29, PT_T4(sp) REG_L x30, PT_T5(sp) REG_L x31, PT_T6(sp) addi sp, sp, PT_SIZE_ON_STACK .endm #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ ENTRY(ftrace_caller) SAVE_ABI addi a0, t0, -FENTRY_RA_OFFSET la a1, function_trace_op REG_L a2, 0(a1) mv a1, ra mv a3, sp ftrace_call: .global ftrace_call call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER addi a0, sp, ABI_RA REG_L a1, ABI_T0(sp) addi a1, a1, -FENTRY_RA_OFFSET #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv a2, s0 #endif ftrace_graph_call: .global ftrace_graph_call call ftrace_stub #endif RESTORE_ABI jr t0 ENDPROC(ftrace_caller) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS ENTRY(ftrace_regs_caller) SAVE_ALL addi a0, t0, -FENTRY_RA_OFFSET la a1, function_trace_op REG_L a2, 0(a1) mv a1, ra mv a3, sp ftrace_regs_call: .global ftrace_regs_call call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER addi a0, sp, PT_RA REG_L a1, PT_EPC(sp) addi a1, a1, -FENTRY_RA_OFFSET #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv a2, s0 #endif ftrace_graph_regs_call: .global ftrace_graph_regs_call call ftrace_stub #endif RESTORE_ALL jr t0 ENDPROC(ftrace_regs_caller) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
aixcc-public/challenge-001-exemplar-source
2,641
arch/riscv/kernel/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2017 Andes Technology Corporation */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm-generic/export.h> #include <asm/ftrace.h> .text .macro SAVE_ABI_STATE addi sp, sp, -16 sd s0, 0(sp) sd ra, 8(sp) addi s0, sp, 16 .endm /* * The call to ftrace_return_to_handler would overwrite the return * register if a0 was not saved. */ .macro SAVE_RET_ABI_STATE addi sp, sp, -32 sd s0, 16(sp) sd ra, 24(sp) sd a0, 8(sp) addi s0, sp, 32 .endm .macro RESTORE_ABI_STATE ld ra, 8(sp) ld s0, 0(sp) addi sp, sp, 16 .endm .macro RESTORE_RET_ABI_STATE ld ra, 24(sp) ld s0, 16(sp) ld a0, 8(sp) addi sp, sp, 32 .endm ENTRY(ftrace_stub) #ifdef CONFIG_DYNAMIC_FTRACE .global MCOUNT_NAME .set MCOUNT_NAME, ftrace_stub #endif ret ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(return_to_handler) /* * On implementing the frame point test, the ideal way is to compare the * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return. * However, the psABI of variable-length-argument functions does not allow this. * * So alternatively we check the *old* frame pointer position, that is, the * value stored in -16(s0) on entry, and the s0 on return. */ #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv t6, s0 #endif SAVE_RET_ABI_STATE #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv a0, t6 #endif call ftrace_return_to_handler mv a1, a0 RESTORE_RET_ABI_STATE jalr a1 ENDPROC(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE ENTRY(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return ld t1, 0(t0) bne t1, t4, do_ftrace_graph_caller la t3, ftrace_graph_entry ld t2, 0(t3) la t6, ftrace_graph_entry_stub bne t2, t6, do_ftrace_graph_caller #endif la t3, ftrace_trace_function ld t5, 0(t3) bne t5, t4, do_trace ret #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * A pseudo representation for the function graph tracer: * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller) */ do_ftrace_graph_caller: addi a0, s0, -8 mv a1, ra #ifdef HAVE_FUNCTION_GRAPH_FP_TEST ld a2, -16(s0) #endif SAVE_ABI_STATE call prepare_ftrace_return RESTORE_ABI_STATE ret #endif /* * A pseudo representation for the function tracer: * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller) */ do_trace: ld a1, -8(s0) mv a0, ra SAVE_ABI_STATE jalr t5 RESTORE_ABI_STATE ret ENDPROC(MCOUNT_NAME) #endif EXPORT_SYMBOL(MCOUNT_NAME)
aixcc-public/challenge-001-exemplar-source
2,669
arch/riscv/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive */ #define RO_EXCEPTION_TABLE_ALIGN 4 #ifdef CONFIG_XIP_KERNEL #include "vmlinux-xip.lds.S" #else #include <asm/pgtable.h> #define LOAD_OFFSET KERNEL_LINK_ADDR #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/set_memory.h> #include "image-vars.h" #include <linux/sizes.h> OUTPUT_ARCH(riscv) ENTRY(_start) jiffies = jiffies_64; PECOFF_SECTION_ALIGNMENT = 0x1000; PECOFF_FILE_ALIGNMENT = 0x200; SECTIONS { /* Beginning of code and text segment */ . = LOAD_OFFSET; _start = .; HEAD_TEXT_SECTION . = ALIGN(PAGE_SIZE); .text : { _text = .; _stext = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ENTRY_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT _etext = .; } . = ALIGN(SECTION_ALIGN); __init_begin = .; __init_text_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) ALIGN(SECTION_ALIGN) { \ _sinittext = .; \ INIT_TEXT \ _einittext = .; \ } . = ALIGN(8); __soc_early_init_table : { __soc_early_init_table_start = .; KEEP(*(__soc_early_init_table)) __soc_early_init_table_end = .; } __soc_builtin_dtb_table : { __soc_builtin_dtb_table_start = .; KEEP(*(__soc_builtin_dtb_table)) __soc_builtin_dtb_table_end = .; } /* we have to discard exit text and such at runtime, not link time */ .exit.text : { EXIT_TEXT } __init_text_end = .; . = ALIGN(SECTION_ALIGN); #ifdef CONFIG_EFI . = ALIGN(PECOFF_SECTION_ALIGNMENT); __pecoff_text_end = .; #endif /* Start of init data section */ __init_data_begin = .; INIT_DATA_SECTION(16) .exit.data : { EXIT_DATA } PERCPU_SECTION(L1_CACHE_BYTES) .rel.dyn : { *(.rel.dyn*) } __init_data_end = .; . = ALIGN(8); .alternative : { __alt_start = .; *(.alternative) __alt_end = .; } __init_end = .; /* Start of data section */ _sdata = .; RO_DATA(SECTION_ALIGN) .srodata : { *(.srodata*) } . = ALIGN(SECTION_ALIGN); _data = .; RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) } #ifdef CONFIG_EFI .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } __pecoff_data_raw_size = ABSOLUTE(. - __pecoff_text_end); #endif /* End of data section */ _edata = .; BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) #ifdef CONFIG_EFI . = ALIGN(PECOFF_SECTION_ALIGNMENT); __pecoff_data_virt_size = ABSOLUTE(. - __pecoff_text_end); #endif _end = .; STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS } #endif /* CONFIG_XIP_KERNEL */
aixcc-public/challenge-001-exemplar-source
1,596
arch/riscv/kernel/crash_save_regs.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020 FORTH-ICS/CARV * Nick Kossifidis <mick@ics.forth.gr> */ #include <asm/asm.h> /* For RISCV_* and REG_* macros */ #include <asm/csr.h> /* For CSR_* macros */ #include <asm/asm-offsets.h> /* For offsets on pt_regs */ #include <linux/linkage.h> /* For SYM_* macros */ .section ".text" SYM_CODE_START(riscv_crash_save_regs) REG_S ra, PT_RA(a0) /* x1 */ REG_S sp, PT_SP(a0) /* x2 */ REG_S gp, PT_GP(a0) /* x3 */ REG_S tp, PT_TP(a0) /* x4 */ REG_S t0, PT_T0(a0) /* x5 */ REG_S t1, PT_T1(a0) /* x6 */ REG_S t2, PT_T2(a0) /* x7 */ REG_S s0, PT_S0(a0) /* x8/fp */ REG_S s1, PT_S1(a0) /* x9 */ REG_S a0, PT_A0(a0) /* x10 */ REG_S a1, PT_A1(a0) /* x11 */ REG_S a2, PT_A2(a0) /* x12 */ REG_S a3, PT_A3(a0) /* x13 */ REG_S a4, PT_A4(a0) /* x14 */ REG_S a5, PT_A5(a0) /* x15 */ REG_S a6, PT_A6(a0) /* x16 */ REG_S a7, PT_A7(a0) /* x17 */ REG_S s2, PT_S2(a0) /* x18 */ REG_S s3, PT_S3(a0) /* x19 */ REG_S s4, PT_S4(a0) /* x20 */ REG_S s5, PT_S5(a0) /* x21 */ REG_S s6, PT_S6(a0) /* x22 */ REG_S s7, PT_S7(a0) /* x23 */ REG_S s8, PT_S8(a0) /* x24 */ REG_S s9, PT_S9(a0) /* x25 */ REG_S s10, PT_S10(a0) /* x26 */ REG_S s11, PT_S11(a0) /* x27 */ REG_S t3, PT_T3(a0) /* x28 */ REG_S t4, PT_T4(a0) /* x29 */ REG_S t5, PT_T5(a0) /* x30 */ REG_S t6, PT_T6(a0) /* x31 */ csrr t1, CSR_STATUS auipc t2, 0x0 csrr t3, CSR_TVAL csrr t4, CSR_CAUSE REG_S t1, PT_STATUS(a0) REG_S t2, PT_EPC(a0) REG_S t3, PT_BADADDR(a0) REG_S t4, PT_CAUSE(a0) ret SYM_CODE_END(riscv_crash_save_regs)
aixcc-public/challenge-001-exemplar-source
4,488
arch/riscv/kernel/kexec_relocate.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2019 FORTH-ICS/CARV * Nick Kossifidis <mick@ics.forth.gr> */ #include <asm/asm.h> /* For RISCV_* and REG_* macros */ #include <asm/csr.h> /* For CSR_* macros */ #include <asm/page.h> /* For PAGE_SIZE */ #include <linux/linkage.h> /* For SYM_* macros */ .section ".rodata" SYM_CODE_START(riscv_kexec_relocate) /* * s0: Pointer to the current entry * s1: (const) Phys address to jump to after relocation * s2: (const) Phys address of the FDT image * s3: (const) The hartid of the current hart * s4: Pointer to the destination address for the relocation * s5: (const) Number of words per page * s6: (const) 1, used for subtraction * s7: (const) kernel_map.va_pa_offset, used when switching MMU off * s8: (const) Physical address of the main loop * s9: (debug) indirection page counter * s10: (debug) entry counter * s11: (debug) copied words counter */ mv s0, a0 mv s1, a1 mv s2, a2 mv s3, a3 mv s4, zero li s5, (PAGE_SIZE / RISCV_SZPTR) li s6, 1 mv s7, a4 mv s8, zero mv s9, zero mv s10, zero mv s11, zero /* Disable / cleanup interrupts */ csrw CSR_SIE, zero csrw CSR_SIP, zero /* * When we switch SATP.MODE to "Bare" we'll only * play with physical addresses. However the first time * we try to jump somewhere, the offset on the jump * will be relative to pc which will still be on VA. To * deal with this we set stvec to the physical address at * the start of the loop below so that we jump there in * any case. */ la s8, 1f sub s8, s8, s7 csrw CSR_STVEC, s8 /* Process entries in a loop */ .align 2 1: addi s10, s10, 1 REG_L t0, 0(s0) /* t0 = *image->entry */ addi s0, s0, RISCV_SZPTR /* image->entry++ */ /* IND_DESTINATION entry ? -> save destination address */ andi t1, t0, 0x1 beqz t1, 2f andi s4, t0, ~0x1 j 1b 2: /* IND_INDIRECTION entry ? -> update next entry ptr (PA) */ andi t1, t0, 0x2 beqz t1, 2f andi s0, t0, ~0x2 addi s9, s9, 1 csrw CSR_SATP, zero jalr zero, s8, 0 2: /* IND_DONE entry ? -> jump to done label */ andi t1, t0, 0x4 beqz t1, 2f j 4f 2: /* * IND_SOURCE entry ? -> copy page word by word to the * destination address we got from IND_DESTINATION */ andi t1, t0, 0x8 beqz t1, 1b /* Unknown entry type, ignore it */ andi t0, t0, ~0x8 mv t3, s5 /* i = num words per page */ 3: /* copy loop */ REG_L t1, (t0) /* t1 = *src_ptr */ REG_S t1, (s4) /* *dst_ptr = *src_ptr */ addi t0, t0, RISCV_SZPTR /* stc_ptr++ */ addi s4, s4, RISCV_SZPTR /* dst_ptr++ */ sub t3, t3, s6 /* i-- */ addi s11, s11, 1 /* c++ */ beqz t3, 1b /* copy done ? */ j 3b 4: /* Pass the arguments to the next kernel / Cleanup*/ mv a0, s3 mv a1, s2 mv a2, s1 /* Cleanup */ mv a3, zero mv a4, zero mv a5, zero mv a6, zero mv a7, zero mv s0, zero mv s1, zero mv s2, zero mv s3, zero mv s4, zero mv s5, zero mv s6, zero mv s7, zero mv s8, zero mv s9, zero mv s10, zero mv s11, zero mv t0, zero mv t1, zero mv t2, zero mv t3, zero mv t4, zero mv t5, zero mv t6, zero csrw CSR_SEPC, zero csrw CSR_SCAUSE, zero csrw CSR_SSCRATCH, zero /* * Make sure the relocated code is visible * and jump to the new kernel */ fence.i jalr zero, a2, 0 SYM_CODE_END(riscv_kexec_relocate) riscv_kexec_relocate_end: /* Used for jumping to crashkernel */ .section ".text" SYM_CODE_START(riscv_kexec_norelocate) /* * s0: (const) Phys address to jump to * s1: (const) Phys address of the FDT image * s2: (const) The hartid of the current hart */ mv s0, a1 mv s1, a2 mv s2, a3 /* Disable / cleanup interrupts */ csrw CSR_SIE, zero csrw CSR_SIP, zero /* Pass the arguments to the next kernel / Cleanup*/ mv a0, s2 mv a1, s1 mv a2, s0 /* Cleanup */ mv a3, zero mv a4, zero mv a5, zero mv a6, zero mv a7, zero mv s0, zero mv s1, zero mv s2, zero mv s3, zero mv s4, zero mv s5, zero mv s6, zero mv s7, zero mv s8, zero mv s9, zero mv s10, zero mv s11, zero mv t0, zero mv t1, zero mv t2, zero mv t3, zero mv t4, zero mv t5, zero mv t6, zero csrw CSR_SEPC, zero csrw CSR_SCAUSE, zero csrw CSR_SSCRATCH, zero /* * Switch to physical addressing * This will also trigger a jump to CSR_STVEC * which in this case is the address of the new * kernel. */ csrw CSR_STVEC, a2 csrw CSR_SATP, zero SYM_CODE_END(riscv_kexec_norelocate) .section ".rodata" SYM_DATA(riscv_kexec_relocate_size, .long riscv_kexec_relocate_end - riscv_kexec_relocate)
aixcc-public/challenge-001-exemplar-source
2,147
arch/riscv/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Regents of the University of California */ #include <linux/linkage.h> #include <asm/asm.h> /* void *memcpy(void *, const void *, size_t) */ ENTRY(__memcpy) WEAK(memcpy) move t6, a0 /* Preserve return value */ /* Defer to byte-oriented copy for small sizes */ sltiu a3, a2, 128 bnez a3, 4f /* Use word-oriented copy only if low-order bits match */ andi a3, t6, SZREG-1 andi a4, a1, SZREG-1 bne a3, a4, 4f beqz a3, 2f /* Skip if already aligned */ /* * Round to nearest double word-aligned address * greater than or equal to start address */ andi a3, a1, ~(SZREG-1) addi a3, a3, SZREG /* Handle initial misalignment */ sub a4, a3, a1 1: lb a5, 0(a1) addi a1, a1, 1 sb a5, 0(t6) addi t6, t6, 1 bltu a1, a3, 1b sub a2, a2, a4 /* Update count */ 2: andi a4, a2, ~((16*SZREG)-1) beqz a4, 4f add a3, a1, a4 3: REG_L a4, 0(a1) REG_L a5, SZREG(a1) REG_L a6, 2*SZREG(a1) REG_L a7, 3*SZREG(a1) REG_L t0, 4*SZREG(a1) REG_L t1, 5*SZREG(a1) REG_L t2, 6*SZREG(a1) REG_L t3, 7*SZREG(a1) REG_L t4, 8*SZREG(a1) REG_L t5, 9*SZREG(a1) REG_S a4, 0(t6) REG_S a5, SZREG(t6) REG_S a6, 2*SZREG(t6) REG_S a7, 3*SZREG(t6) REG_S t0, 4*SZREG(t6) REG_S t1, 5*SZREG(t6) REG_S t2, 6*SZREG(t6) REG_S t3, 7*SZREG(t6) REG_S t4, 8*SZREG(t6) REG_S t5, 9*SZREG(t6) REG_L a4, 10*SZREG(a1) REG_L a5, 11*SZREG(a1) REG_L a6, 12*SZREG(a1) REG_L a7, 13*SZREG(a1) REG_L t0, 14*SZREG(a1) REG_L t1, 15*SZREG(a1) addi a1, a1, 16*SZREG REG_S a4, 10*SZREG(t6) REG_S a5, 11*SZREG(t6) REG_S a6, 12*SZREG(t6) REG_S a7, 13*SZREG(t6) REG_S t0, 14*SZREG(t6) REG_S t1, 15*SZREG(t6) addi t6, t6, 16*SZREG bltu a1, a3, 3b andi a2, a2, (16*SZREG)-1 /* Update count */ 4: /* Handle trailing misalignment */ beqz a2, 6f add a3, a1, a2 /* Use word-oriented copy if co-aligned to word boundary */ or a5, a1, t6 or a5, a5, a3 andi a5, a5, 3 bnez a5, 5f 7: lw a4, 0(a1) addi a1, a1, 4 sw a4, 0(t6) addi t6, t6, 4 bltu a1, a3, 7b ret 5: lb a4, 0(a1) addi a1, a1, 1 sb a4, 0(t6) addi t6, t6, 1 bltu a1, a3, 5b 6: ret END(__memcpy)
aixcc-public/challenge-001-exemplar-source
1,030
arch/riscv/lib/tishift.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 Free Software Foundation, Inc. */ #include <linux/linkage.h> #include <asm-generic/export.h> SYM_FUNC_START(__lshrti3) beqz a2, .L1 li a5,64 sub a5,a5,a2 sext.w a4,a5 blez a5, .L2 sext.w a2,a2 srl a0,a0,a2 sll a4,a1,a4 srl a2,a1,a2 or a0,a0,a4 mv a1,a2 .L1: ret .L2: negw a0,a4 li a2,0 srl a0,a1,a0 mv a1,a2 ret SYM_FUNC_END(__lshrti3) EXPORT_SYMBOL(__lshrti3) SYM_FUNC_START(__ashrti3) beqz a2, .L3 li a5,64 sub a5,a5,a2 sext.w a4,a5 blez a5, .L4 sext.w a2,a2 srl a0,a0,a2 sll a4,a1,a4 sra a2,a1,a2 or a0,a0,a4 mv a1,a2 .L3: ret .L4: negw a0,a4 srai a2,a1,0x3f sra a0,a1,a0 mv a1,a2 ret SYM_FUNC_END(__ashrti3) EXPORT_SYMBOL(__ashrti3) SYM_FUNC_START(__ashlti3) beqz a2, .L5 li a5,64 sub a5,a5,a2 sext.w a4,a5 blez a5, .L6 sext.w a2,a2 sll a1,a1,a2 srl a4,a0,a4 sll a2,a0,a2 or a1,a1,a4 mv a0,a2 .L5: ret .L6: negw a1,a4 li a2,0 sll a1,a0,a1 mv a0,a2 ret SYM_FUNC_END(__ashlti3) EXPORT_SYMBOL(__ashlti3)
aixcc-public/challenge-001-exemplar-source
8,025
arch/riscv/lib/memmove.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2022 Michael T. Kloos <michael@michaelkloos.com> */ #include <linux/linkage.h> #include <asm/asm.h> SYM_FUNC_START(__memmove) SYM_FUNC_START_WEAK(memmove) /* * Returns * a0 - dest * * Parameters * a0 - Inclusive first byte of dest * a1 - Inclusive first byte of src * a2 - Length of copy n * * Because the return matches the parameter register a0, * we will not clobber or modify that register. * * Note: This currently only works on little-endian. * To port to big-endian, reverse the direction of shifts * in the 2 misaligned fixup copy loops. */ /* Return if nothing to do */ beq a0, a1, return_from_memmove beqz a2, return_from_memmove /* * Register Uses * Forward Copy: a1 - Index counter of src * Reverse Copy: a4 - Index counter of src * Forward Copy: t3 - Index counter of dest * Reverse Copy: t4 - Index counter of dest * Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest * Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest * Both Copy Modes: t0 - Link / Temporary for load-store * Both Copy Modes: t1 - Temporary for load-store * Both Copy Modes: t2 - Temporary for load-store * Both Copy Modes: a5 - dest to src alignment offset * Both Copy Modes: a6 - Shift ammount * Both Copy Modes: a7 - Inverse Shift ammount * Both Copy Modes: a2 - Alternate breakpoint for unrolled loops */ /* * Solve for some register values now. * Byte copy does not need t5 or t6. */ mv t3, a0 add t4, a0, a2 add a4, a1, a2 /* * Byte copy if copying less than (2 * SZREG) bytes. This can * cause problems with the bulk copy implementation and is * small enough not to bother. */ andi t0, a2, -(2 * SZREG) beqz t0, byte_copy /* * Now solve for t5 and t6. */ andi t5, t3, -SZREG andi t6, t4, -SZREG /* * If dest(Register t3) rounded down to the nearest naturally * aligned SZREG address, does not equal dest, then add SZREG * to find the low-bound of SZREG alignment in the dest memory * region. Note that this could overshoot the dest memory * region if n is less than SZREG. This is one reason why * we always byte copy if n is less than SZREG. * Otherwise, dest is already naturally aligned to SZREG. */ beq t5, t3, 1f addi t5, t5, SZREG 1: /* * If the dest and src are co-aligned to SZREG, then there is * no need for the full rigmarole of a full misaligned fixup copy. * Instead, do a simpler co-aligned copy. */ xor t0, a0, a1 andi t1, t0, (SZREG - 1) beqz t1, coaligned_copy /* Fall through to misaligned fixup copy */ misaligned_fixup_copy: bltu a1, a0, misaligned_fixup_copy_reverse misaligned_fixup_copy_forward: jal t0, byte_copy_until_aligned_forward andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ sub a5, a1, t3 /* Find the difference between src and dest */ andi a1, a1, -SZREG /* Align the src pointer */ addi a2, t6, SZREG /* The other breakpoint for the unrolled loop*/ /* * Compute The Inverse Shift * a7 = XLEN - a6 = XLEN + -a6 * 2s complement negation to find the negative: -a6 = ~a6 + 1 * Add that to XLEN. XLEN = SZREG * 8. */ not a7, a6 addi a7, a7, (SZREG * 8 + 1) /* * Fix Misalignment Copy Loop - Forward * load_val0 = load_ptr[0]; * do { * load_val1 = load_ptr[1]; * store_ptr += 2; * store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7}); * * if (store_ptr == {a2}) * break; * * load_val0 = load_ptr[2]; * load_ptr += 2; * store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7}); * * } while (store_ptr != store_ptr_end); * store_ptr = store_ptr_end; */ REG_L t0, (0 * SZREG)(a1) 1: REG_L t1, (1 * SZREG)(a1) addi t3, t3, (2 * SZREG) srl t0, t0, a6 sll t2, t1, a7 or t2, t0, t2 REG_S t2, ((0 * SZREG) - (2 * SZREG))(t3) beq t3, a2, 2f REG_L t0, (2 * SZREG)(a1) addi a1, a1, (2 * SZREG) srl t1, t1, a6 sll t2, t0, a7 or t2, t1, t2 REG_S t2, ((1 * SZREG) - (2 * SZREG))(t3) bne t3, t6, 1b 2: mv t3, t6 /* Fix the dest pointer in case the loop was broken */ add a1, t3, a5 /* Restore the src pointer */ j byte_copy_forward /* Copy any remaining bytes */ misaligned_fixup_copy_reverse: jal t0, byte_copy_until_aligned_reverse andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ sub a5, a4, t4 /* Find the difference between src and dest */ andi a4, a4, -SZREG /* Align the src pointer */ addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/ /* * Compute The Inverse Shift * a7 = XLEN - a6 = XLEN + -a6 * 2s complement negation to find the negative: -a6 = ~a6 + 1 * Add that to XLEN. XLEN = SZREG * 8. */ not a7, a6 addi a7, a7, (SZREG * 8 + 1) /* * Fix Misalignment Copy Loop - Reverse * load_val1 = load_ptr[0]; * do { * load_val0 = load_ptr[-1]; * store_ptr -= 2; * store_ptr[1] = (load_val0 >> {a6}) | (load_val1 << {a7}); * * if (store_ptr == {a2}) * break; * * load_val1 = load_ptr[-2]; * load_ptr -= 2; * store_ptr[0] = (load_val1 >> {a6}) | (load_val0 << {a7}); * * } while (store_ptr != store_ptr_end); * store_ptr = store_ptr_end; */ REG_L t1, ( 0 * SZREG)(a4) 1: REG_L t0, (-1 * SZREG)(a4) addi t4, t4, (-2 * SZREG) sll t1, t1, a7 srl t2, t0, a6 or t2, t1, t2 REG_S t2, ( 1 * SZREG)(t4) beq t4, a2, 2f REG_L t1, (-2 * SZREG)(a4) addi a4, a4, (-2 * SZREG) sll t0, t0, a7 srl t2, t1, a6 or t2, t0, t2 REG_S t2, ( 0 * SZREG)(t4) bne t4, t5, 1b 2: mv t4, t5 /* Fix the dest pointer in case the loop was broken */ add a4, t4, a5 /* Restore the src pointer */ j byte_copy_reverse /* Copy any remaining bytes */ /* * Simple copy loops for SZREG co-aligned memory locations. * These also make calls to do byte copies for any unaligned * data at their terminations. */ coaligned_copy: bltu a1, a0, coaligned_copy_reverse coaligned_copy_forward: jal t0, byte_copy_until_aligned_forward 1: REG_L t1, ( 0 * SZREG)(a1) addi a1, a1, SZREG addi t3, t3, SZREG REG_S t1, (-1 * SZREG)(t3) bne t3, t6, 1b j byte_copy_forward /* Copy any remaining bytes */ coaligned_copy_reverse: jal t0, byte_copy_until_aligned_reverse 1: REG_L t1, (-1 * SZREG)(a4) addi a4, a4, -SZREG addi t4, t4, -SZREG REG_S t1, ( 0 * SZREG)(t4) bne t4, t5, 1b j byte_copy_reverse /* Copy any remaining bytes */ /* * These are basically sub-functions within the function. They * are used to byte copy until the dest pointer is in alignment. * At which point, a bulk copy method can be used by the * calling code. These work on the same registers as the bulk * copy loops. Therefore, the register values can be picked * up from where they were left and we avoid code duplication * without any overhead except the call in and return jumps. */ byte_copy_until_aligned_forward: beq t3, t5, 2f 1: lb t1, 0(a1) addi a1, a1, 1 addi t3, t3, 1 sb t1, -1(t3) bne t3, t5, 1b 2: jalr zero, 0x0(t0) /* Return to multibyte copy loop */ byte_copy_until_aligned_reverse: beq t4, t6, 2f 1: lb t1, -1(a4) addi a4, a4, -1 addi t4, t4, -1 sb t1, 0(t4) bne t4, t6, 1b 2: jalr zero, 0x0(t0) /* Return to multibyte copy loop */ /* * Simple byte copy loops. * These will byte copy until they reach the end of data to copy. * At that point, they will call to return from memmove. */ byte_copy: bltu a1, a0, byte_copy_reverse byte_copy_forward: beq t3, t4, 2f 1: lb t1, 0(a1) addi a1, a1, 1 addi t3, t3, 1 sb t1, -1(t3) bne t3, t4, 1b 2: ret byte_copy_reverse: beq t4, t3, 2f 1: lb t1, -1(a4) addi a4, a4, -1 addi t4, t4, -1 sb t1, 0(t4) bne t4, t3, 1b 2: return_from_memmove: ret SYM_FUNC_END(memmove) SYM_FUNC_END(__memmove)
aixcc-public/challenge-001-exemplar-source
5,325
arch/riscv/lib/uaccess.S
#include <linux/linkage.h> #include <asm-generic/export.h> #include <asm/asm.h> #include <asm/asm-extable.h> #include <asm/csr.h> .macro fixup op reg addr lbl 100: \op \reg, \addr _asm_extable 100b, \lbl .endm ENTRY(__asm_copy_to_user) ENTRY(__asm_copy_from_user) /* Enable access to user memory */ li t6, SR_SUM csrs CSR_STATUS, t6 /* * Save the terminal address which will be used to compute the number * of bytes copied in case of a fixup exception. */ add t5, a0, a2 /* * Register allocation for code below: * a0 - start of uncopied dst * a1 - start of uncopied src * a2 - size * t0 - end of uncopied dst */ add t0, a0, a2 /* * Use byte copy only if too small. * SZREG holds 4 for RV32 and 8 for RV64 */ li a3, 9*SZREG /* size must be larger than size in word_copy */ bltu a2, a3, .Lbyte_copy_tail /* * Copy first bytes until dst is aligned to word boundary. * a0 - start of dst * t1 - start of aligned dst */ addi t1, a0, SZREG-1 andi t1, t1, ~(SZREG-1) /* dst is already aligned, skip */ beq a0, t1, .Lskip_align_dst 1: /* a5 - one byte for copying data */ fixup lb a5, 0(a1), 10f addi a1, a1, 1 /* src */ fixup sb a5, 0(a0), 10f addi a0, a0, 1 /* dst */ bltu a0, t1, 1b /* t1 - start of aligned dst */ .Lskip_align_dst: /* * Now dst is aligned. * Use shift-copy if src is misaligned. * Use word-copy if both src and dst are aligned because * can not use shift-copy which do not require shifting */ /* a1 - start of src */ andi a3, a1, SZREG-1 bnez a3, .Lshift_copy .Lword_copy: /* * Both src and dst are aligned, unrolled word copy * * a0 - start of aligned dst * a1 - start of aligned src * t0 - end of aligned dst */ addi t0, t0, -(8*SZREG) /* not to over run */ 2: fixup REG_L a4, 0(a1), 10f fixup REG_L a5, SZREG(a1), 10f fixup REG_L a6, 2*SZREG(a1), 10f fixup REG_L a7, 3*SZREG(a1), 10f fixup REG_L t1, 4*SZREG(a1), 10f fixup REG_L t2, 5*SZREG(a1), 10f fixup REG_L t3, 6*SZREG(a1), 10f fixup REG_L t4, 7*SZREG(a1), 10f fixup REG_S a4, 0(a0), 10f fixup REG_S a5, SZREG(a0), 10f fixup REG_S a6, 2*SZREG(a0), 10f fixup REG_S a7, 3*SZREG(a0), 10f fixup REG_S t1, 4*SZREG(a0), 10f fixup REG_S t2, 5*SZREG(a0), 10f fixup REG_S t3, 6*SZREG(a0), 10f fixup REG_S t4, 7*SZREG(a0), 10f addi a0, a0, 8*SZREG addi a1, a1, 8*SZREG bltu a0, t0, 2b addi t0, t0, 8*SZREG /* revert to original value */ j .Lbyte_copy_tail .Lshift_copy: /* * Word copy with shifting. * For misaligned copy we still perform aligned word copy, but * we need to use the value fetched from the previous iteration and * do some shifts. * This is safe because reading is less than a word size. * * a0 - start of aligned dst * a1 - start of src * a3 - a1 & mask:(SZREG-1) * t0 - end of uncopied dst * t1 - end of aligned dst */ /* calculating aligned word boundary for dst */ andi t1, t0, ~(SZREG-1) /* Converting unaligned src to aligned src */ andi a1, a1, ~(SZREG-1) /* * Calculate shifts * t3 - prev shift * t4 - current shift */ slli t3, a3, 3 /* converting bytes in a3 to bits */ li a5, SZREG*8 sub t4, a5, t3 /* Load the first word to combine with second word */ fixup REG_L a5, 0(a1), 10f 3: /* Main shifting copy * * a0 - start of aligned dst * a1 - start of aligned src * t1 - end of aligned dst */ /* At least one iteration will be executed */ srl a4, a5, t3 fixup REG_L a5, SZREG(a1), 10f addi a1, a1, SZREG sll a2, a5, t4 or a2, a2, a4 fixup REG_S a2, 0(a0), 10f addi a0, a0, SZREG bltu a0, t1, 3b /* Revert src to original unaligned value */ add a1, a1, a3 .Lbyte_copy_tail: /* * Byte copy anything left. * * a0 - start of remaining dst * a1 - start of remaining src * t0 - end of remaining dst */ bgeu a0, t0, .Lout_copy_user /* check if end of copy */ 4: fixup lb a5, 0(a1), 10f addi a1, a1, 1 /* src */ fixup sb a5, 0(a0), 10f addi a0, a0, 1 /* dst */ bltu a0, t0, 4b /* t0 - end of dst */ .Lout_copy_user: /* Disable access to user memory */ csrc CSR_STATUS, t6 li a0, 0 ret /* Exception fixup code */ 10: /* Disable access to user memory */ csrc CSR_STATUS, t6 sub a0, t5, a0 ret ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) EXPORT_SYMBOL(__asm_copy_to_user) EXPORT_SYMBOL(__asm_copy_from_user) ENTRY(__clear_user) /* Enable access to user memory */ li t6, SR_SUM csrs CSR_STATUS, t6 add a3, a0, a1 addi t0, a0, SZREG-1 andi t1, a3, ~(SZREG-1) andi t0, t0, ~(SZREG-1) /* * a3: terminal address of target region * t0: lowest doubleword-aligned address in target region * t1: highest doubleword-aligned address in target region */ bgeu t0, t1, 2f bltu a0, t0, 4f 1: fixup REG_S, zero, (a0), 11f addi a0, a0, SZREG bltu a0, t1, 1b 2: bltu a0, a3, 5f 3: /* Disable access to user memory */ csrc CSR_STATUS, t6 li a0, 0 ret 4: /* Edge case: unalignment */ fixup sb, zero, (a0), 11f addi a0, a0, 1 bltu a0, t0, 4b j 1b 5: /* Edge case: remainder */ fixup sb, zero, (a0), 11f addi a0, a0, 1 bltu a0, a3, 5b j 3b /* Exception fixup code */ 11: /* Disable access to user memory */ csrc CSR_STATUS, t6 sub a0, a3, a0 ret ENDPROC(__clear_user) EXPORT_SYMBOL(__clear_user)
aixcc-public/challenge-001-exemplar-source
2,363
arch/riscv/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Regents of the University of California */ #include <linux/linkage.h> #include <asm/asm.h> /* void *memset(void *, int, size_t) */ ENTRY(__memset) WEAK(memset) move t0, a0 /* Preserve return value */ /* Defer to byte-oriented fill for small sizes */ sltiu a3, a2, 16 bnez a3, 4f /* * Round to nearest XLEN-aligned address * greater than or equal to start address */ addi a3, t0, SZREG-1 andi a3, a3, ~(SZREG-1) beq a3, t0, 2f /* Skip if already aligned */ /* Handle initial misalignment */ sub a4, a3, t0 1: sb a1, 0(t0) addi t0, t0, 1 bltu t0, a3, 1b sub a2, a2, a4 /* Update count */ 2: /* Duff's device with 32 XLEN stores per iteration */ /* Broadcast value into all bytes */ andi a1, a1, 0xff slli a3, a1, 8 or a1, a3, a1 slli a3, a1, 16 or a1, a3, a1 #ifdef CONFIG_64BIT slli a3, a1, 32 or a1, a3, a1 #endif /* Calculate end address */ andi a4, a2, ~(SZREG-1) add a3, t0, a4 andi a4, a4, 31*SZREG /* Calculate remainder */ beqz a4, 3f /* Shortcut if no remainder */ neg a4, a4 addi a4, a4, 32*SZREG /* Calculate initial offset */ /* Adjust start address with offset */ sub t0, t0, a4 /* Jump into loop body */ /* Assumes 32-bit instruction lengths */ la a5, 3f #ifdef CONFIG_64BIT srli a4, a4, 1 #endif add a5, a5, a4 jr a5 3: REG_S a1, 0(t0) REG_S a1, SZREG(t0) REG_S a1, 2*SZREG(t0) REG_S a1, 3*SZREG(t0) REG_S a1, 4*SZREG(t0) REG_S a1, 5*SZREG(t0) REG_S a1, 6*SZREG(t0) REG_S a1, 7*SZREG(t0) REG_S a1, 8*SZREG(t0) REG_S a1, 9*SZREG(t0) REG_S a1, 10*SZREG(t0) REG_S a1, 11*SZREG(t0) REG_S a1, 12*SZREG(t0) REG_S a1, 13*SZREG(t0) REG_S a1, 14*SZREG(t0) REG_S a1, 15*SZREG(t0) REG_S a1, 16*SZREG(t0) REG_S a1, 17*SZREG(t0) REG_S a1, 18*SZREG(t0) REG_S a1, 19*SZREG(t0) REG_S a1, 20*SZREG(t0) REG_S a1, 21*SZREG(t0) REG_S a1, 22*SZREG(t0) REG_S a1, 23*SZREG(t0) REG_S a1, 24*SZREG(t0) REG_S a1, 25*SZREG(t0) REG_S a1, 26*SZREG(t0) REG_S a1, 27*SZREG(t0) REG_S a1, 28*SZREG(t0) REG_S a1, 29*SZREG(t0) REG_S a1, 30*SZREG(t0) REG_S a1, 31*SZREG(t0) addi t0, t0, 32*SZREG bltu t0, a3, 3b andi a2, a2, SZREG-1 /* Update count */ 4: /* Handle trailing misalignment */ beqz a2, 6f add a3, t0, a2 5: sb a1, 0(t0) addi t0, t0, 1 bltu t0, a3, 5b 6: ret END(__memset)
aixcc-public/challenge-001-exemplar-source
11,657
arch/riscv/kvm/vcpu_switch.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2019 Western Digital Corporation or its affiliates. * * Authors: * Anup Patel <anup.patel@wdc.com> */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/csr.h> .text .altmacro .option norelax ENTRY(__kvm_riscv_switch_to) /* Save Host GPRs (except A0 and T0-T6) */ REG_S ra, (KVM_ARCH_HOST_RA)(a0) REG_S sp, (KVM_ARCH_HOST_SP)(a0) REG_S gp, (KVM_ARCH_HOST_GP)(a0) REG_S tp, (KVM_ARCH_HOST_TP)(a0) REG_S s0, (KVM_ARCH_HOST_S0)(a0) REG_S s1, (KVM_ARCH_HOST_S1)(a0) REG_S a1, (KVM_ARCH_HOST_A1)(a0) REG_S a2, (KVM_ARCH_HOST_A2)(a0) REG_S a3, (KVM_ARCH_HOST_A3)(a0) REG_S a4, (KVM_ARCH_HOST_A4)(a0) REG_S a5, (KVM_ARCH_HOST_A5)(a0) REG_S a6, (KVM_ARCH_HOST_A6)(a0) REG_S a7, (KVM_ARCH_HOST_A7)(a0) REG_S s2, (KVM_ARCH_HOST_S2)(a0) REG_S s3, (KVM_ARCH_HOST_S3)(a0) REG_S s4, (KVM_ARCH_HOST_S4)(a0) REG_S s5, (KVM_ARCH_HOST_S5)(a0) REG_S s6, (KVM_ARCH_HOST_S6)(a0) REG_S s7, (KVM_ARCH_HOST_S7)(a0) REG_S s8, (KVM_ARCH_HOST_S8)(a0) REG_S s9, (KVM_ARCH_HOST_S9)(a0) REG_S s10, (KVM_ARCH_HOST_S10)(a0) REG_S s11, (KVM_ARCH_HOST_S11)(a0) /* Load Guest CSR values */ REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0) REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0) REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) la t4, __kvm_switch_return REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0) /* Save Host and Restore Guest SSTATUS */ csrrw t0, CSR_SSTATUS, t0 /* Save Host and Restore Guest HSTATUS */ csrrw t1, CSR_HSTATUS, t1 /* Save Host and Restore Guest SCOUNTEREN */ csrrw t2, CSR_SCOUNTEREN, t2 /* Save Host STVEC and change it to return path */ csrrw t4, CSR_STVEC, t4 /* Save Host SSCRATCH and change it to struct kvm_vcpu_arch pointer */ csrrw t3, CSR_SSCRATCH, a0 /* Restore Guest SEPC */ csrw CSR_SEPC, t5 /* Store Host CSR values */ REG_S t0, (KVM_ARCH_HOST_SSTATUS)(a0) REG_S t1, (KVM_ARCH_HOST_HSTATUS)(a0) REG_S t2, (KVM_ARCH_HOST_SCOUNTEREN)(a0) REG_S t3, (KVM_ARCH_HOST_SSCRATCH)(a0) REG_S t4, (KVM_ARCH_HOST_STVEC)(a0) /* Restore Guest GPRs (except A0) */ REG_L ra, (KVM_ARCH_GUEST_RA)(a0) REG_L sp, (KVM_ARCH_GUEST_SP)(a0) REG_L gp, (KVM_ARCH_GUEST_GP)(a0) REG_L tp, (KVM_ARCH_GUEST_TP)(a0) REG_L t0, (KVM_ARCH_GUEST_T0)(a0) REG_L t1, (KVM_ARCH_GUEST_T1)(a0) REG_L t2, (KVM_ARCH_GUEST_T2)(a0) REG_L s0, (KVM_ARCH_GUEST_S0)(a0) REG_L s1, (KVM_ARCH_GUEST_S1)(a0) REG_L a1, (KVM_ARCH_GUEST_A1)(a0) REG_L a2, (KVM_ARCH_GUEST_A2)(a0) REG_L a3, (KVM_ARCH_GUEST_A3)(a0) REG_L a4, (KVM_ARCH_GUEST_A4)(a0) REG_L a5, (KVM_ARCH_GUEST_A5)(a0) REG_L a6, (KVM_ARCH_GUEST_A6)(a0) REG_L a7, (KVM_ARCH_GUEST_A7)(a0) REG_L s2, (KVM_ARCH_GUEST_S2)(a0) REG_L s3, (KVM_ARCH_GUEST_S3)(a0) REG_L s4, (KVM_ARCH_GUEST_S4)(a0) REG_L s5, (KVM_ARCH_GUEST_S5)(a0) REG_L s6, (KVM_ARCH_GUEST_S6)(a0) REG_L s7, (KVM_ARCH_GUEST_S7)(a0) REG_L s8, (KVM_ARCH_GUEST_S8)(a0) REG_L s9, (KVM_ARCH_GUEST_S9)(a0) REG_L s10, (KVM_ARCH_GUEST_S10)(a0) REG_L s11, (KVM_ARCH_GUEST_S11)(a0) REG_L t3, (KVM_ARCH_GUEST_T3)(a0) REG_L t4, (KVM_ARCH_GUEST_T4)(a0) REG_L t5, (KVM_ARCH_GUEST_T5)(a0) REG_L t6, (KVM_ARCH_GUEST_T6)(a0) /* Restore Guest A0 */ REG_L a0, (KVM_ARCH_GUEST_A0)(a0) /* Resume Guest */ sret /* Back to Host */ .align 2 __kvm_switch_return: /* Swap Guest A0 with SSCRATCH */ csrrw a0, CSR_SSCRATCH, a0 /* Save Guest GPRs (except A0) */ REG_S ra, (KVM_ARCH_GUEST_RA)(a0) REG_S sp, (KVM_ARCH_GUEST_SP)(a0) REG_S gp, (KVM_ARCH_GUEST_GP)(a0) REG_S tp, (KVM_ARCH_GUEST_TP)(a0) REG_S t0, (KVM_ARCH_GUEST_T0)(a0) REG_S t1, (KVM_ARCH_GUEST_T1)(a0) REG_S t2, (KVM_ARCH_GUEST_T2)(a0) REG_S s0, (KVM_ARCH_GUEST_S0)(a0) REG_S s1, (KVM_ARCH_GUEST_S1)(a0) REG_S a1, (KVM_ARCH_GUEST_A1)(a0) REG_S a2, (KVM_ARCH_GUEST_A2)(a0) REG_S a3, (KVM_ARCH_GUEST_A3)(a0) REG_S a4, (KVM_ARCH_GUEST_A4)(a0) REG_S a5, (KVM_ARCH_GUEST_A5)(a0) REG_S a6, (KVM_ARCH_GUEST_A6)(a0) REG_S a7, (KVM_ARCH_GUEST_A7)(a0) REG_S s2, (KVM_ARCH_GUEST_S2)(a0) REG_S s3, (KVM_ARCH_GUEST_S3)(a0) REG_S s4, (KVM_ARCH_GUEST_S4)(a0) REG_S s5, (KVM_ARCH_GUEST_S5)(a0) REG_S s6, (KVM_ARCH_GUEST_S6)(a0) REG_S s7, (KVM_ARCH_GUEST_S7)(a0) REG_S s8, (KVM_ARCH_GUEST_S8)(a0) REG_S s9, (KVM_ARCH_GUEST_S9)(a0) REG_S s10, (KVM_ARCH_GUEST_S10)(a0) REG_S s11, (KVM_ARCH_GUEST_S11)(a0) REG_S t3, (KVM_ARCH_GUEST_T3)(a0) REG_S t4, (KVM_ARCH_GUEST_T4)(a0) REG_S t5, (KVM_ARCH_GUEST_T5)(a0) REG_S t6, (KVM_ARCH_GUEST_T6)(a0) /* Load Host CSR values */ REG_L t1, (KVM_ARCH_HOST_STVEC)(a0) REG_L t2, (KVM_ARCH_HOST_SSCRATCH)(a0) REG_L t3, (KVM_ARCH_HOST_SCOUNTEREN)(a0) REG_L t4, (KVM_ARCH_HOST_HSTATUS)(a0) REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0) /* Save Guest SEPC */ csrr t0, CSR_SEPC /* Save Guest A0 and Restore Host SSCRATCH */ csrrw t2, CSR_SSCRATCH, t2 /* Restore Host STVEC */ csrw CSR_STVEC, t1 /* Save Guest and Restore Host SCOUNTEREN */ csrrw t3, CSR_SCOUNTEREN, t3 /* Save Guest and Restore Host HSTATUS */ csrrw t4, CSR_HSTATUS, t4 /* Save Guest and Restore Host SSTATUS */ csrrw t5, CSR_SSTATUS, t5 /* Store Guest CSR values */ REG_S t0, (KVM_ARCH_GUEST_SEPC)(a0) REG_S t2, (KVM_ARCH_GUEST_A0)(a0) REG_S t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) REG_S t4, (KVM_ARCH_GUEST_HSTATUS)(a0) REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0) /* Restore Host GPRs (except A0 and T0-T6) */ REG_L ra, (KVM_ARCH_HOST_RA)(a0) REG_L sp, (KVM_ARCH_HOST_SP)(a0) REG_L gp, (KVM_ARCH_HOST_GP)(a0) REG_L tp, (KVM_ARCH_HOST_TP)(a0) REG_L s0, (KVM_ARCH_HOST_S0)(a0) REG_L s1, (KVM_ARCH_HOST_S1)(a0) REG_L a1, (KVM_ARCH_HOST_A1)(a0) REG_L a2, (KVM_ARCH_HOST_A2)(a0) REG_L a3, (KVM_ARCH_HOST_A3)(a0) REG_L a4, (KVM_ARCH_HOST_A4)(a0) REG_L a5, (KVM_ARCH_HOST_A5)(a0) REG_L a6, (KVM_ARCH_HOST_A6)(a0) REG_L a7, (KVM_ARCH_HOST_A7)(a0) REG_L s2, (KVM_ARCH_HOST_S2)(a0) REG_L s3, (KVM_ARCH_HOST_S3)(a0) REG_L s4, (KVM_ARCH_HOST_S4)(a0) REG_L s5, (KVM_ARCH_HOST_S5)(a0) REG_L s6, (KVM_ARCH_HOST_S6)(a0) REG_L s7, (KVM_ARCH_HOST_S7)(a0) REG_L s8, (KVM_ARCH_HOST_S8)(a0) REG_L s9, (KVM_ARCH_HOST_S9)(a0) REG_L s10, (KVM_ARCH_HOST_S10)(a0) REG_L s11, (KVM_ARCH_HOST_S11)(a0) /* Return to C code */ ret ENDPROC(__kvm_riscv_switch_to) ENTRY(__kvm_riscv_unpriv_trap) /* * We assume that faulting unpriv load/store instruction is * 4-byte long and blindly increment SEPC by 4. * * The trap details will be saved at address pointed by 'A0' * register and we use 'A1' register as temporary. */ csrr a1, CSR_SEPC REG_S a1, (KVM_ARCH_TRAP_SEPC)(a0) addi a1, a1, 4 csrw CSR_SEPC, a1 csrr a1, CSR_SCAUSE REG_S a1, (KVM_ARCH_TRAP_SCAUSE)(a0) csrr a1, CSR_STVAL REG_S a1, (KVM_ARCH_TRAP_STVAL)(a0) csrr a1, CSR_HTVAL REG_S a1, (KVM_ARCH_TRAP_HTVAL)(a0) csrr a1, CSR_HTINST REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) sret ENDPROC(__kvm_riscv_unpriv_trap) #ifdef CONFIG_FPU .align 3 .global __kvm_riscv_fp_f_save __kvm_riscv_fp_f_save: csrr t2, CSR_SSTATUS li t1, SR_FS csrs CSR_SSTATUS, t1 frcsr t0 fsw f0, KVM_ARCH_FP_F_F0(a0) fsw f1, KVM_ARCH_FP_F_F1(a0) fsw f2, KVM_ARCH_FP_F_F2(a0) fsw f3, KVM_ARCH_FP_F_F3(a0) fsw f4, KVM_ARCH_FP_F_F4(a0) fsw f5, KVM_ARCH_FP_F_F5(a0) fsw f6, KVM_ARCH_FP_F_F6(a0) fsw f7, KVM_ARCH_FP_F_F7(a0) fsw f8, KVM_ARCH_FP_F_F8(a0) fsw f9, KVM_ARCH_FP_F_F9(a0) fsw f10, KVM_ARCH_FP_F_F10(a0) fsw f11, KVM_ARCH_FP_F_F11(a0) fsw f12, KVM_ARCH_FP_F_F12(a0) fsw f13, KVM_ARCH_FP_F_F13(a0) fsw f14, KVM_ARCH_FP_F_F14(a0) fsw f15, KVM_ARCH_FP_F_F15(a0) fsw f16, KVM_ARCH_FP_F_F16(a0) fsw f17, KVM_ARCH_FP_F_F17(a0) fsw f18, KVM_ARCH_FP_F_F18(a0) fsw f19, KVM_ARCH_FP_F_F19(a0) fsw f20, KVM_ARCH_FP_F_F20(a0) fsw f21, KVM_ARCH_FP_F_F21(a0) fsw f22, KVM_ARCH_FP_F_F22(a0) fsw f23, KVM_ARCH_FP_F_F23(a0) fsw f24, KVM_ARCH_FP_F_F24(a0) fsw f25, KVM_ARCH_FP_F_F25(a0) fsw f26, KVM_ARCH_FP_F_F26(a0) fsw f27, KVM_ARCH_FP_F_F27(a0) fsw f28, KVM_ARCH_FP_F_F28(a0) fsw f29, KVM_ARCH_FP_F_F29(a0) fsw f30, KVM_ARCH_FP_F_F30(a0) fsw f31, KVM_ARCH_FP_F_F31(a0) sw t0, KVM_ARCH_FP_F_FCSR(a0) csrw CSR_SSTATUS, t2 ret .align 3 .global __kvm_riscv_fp_d_save __kvm_riscv_fp_d_save: csrr t2, CSR_SSTATUS li t1, SR_FS csrs CSR_SSTATUS, t1 frcsr t0 fsd f0, KVM_ARCH_FP_D_F0(a0) fsd f1, KVM_ARCH_FP_D_F1(a0) fsd f2, KVM_ARCH_FP_D_F2(a0) fsd f3, KVM_ARCH_FP_D_F3(a0) fsd f4, KVM_ARCH_FP_D_F4(a0) fsd f5, KVM_ARCH_FP_D_F5(a0) fsd f6, KVM_ARCH_FP_D_F6(a0) fsd f7, KVM_ARCH_FP_D_F7(a0) fsd f8, KVM_ARCH_FP_D_F8(a0) fsd f9, KVM_ARCH_FP_D_F9(a0) fsd f10, KVM_ARCH_FP_D_F10(a0) fsd f11, KVM_ARCH_FP_D_F11(a0) fsd f12, KVM_ARCH_FP_D_F12(a0) fsd f13, KVM_ARCH_FP_D_F13(a0) fsd f14, KVM_ARCH_FP_D_F14(a0) fsd f15, KVM_ARCH_FP_D_F15(a0) fsd f16, KVM_ARCH_FP_D_F16(a0) fsd f17, KVM_ARCH_FP_D_F17(a0) fsd f18, KVM_ARCH_FP_D_F18(a0) fsd f19, KVM_ARCH_FP_D_F19(a0) fsd f20, KVM_ARCH_FP_D_F20(a0) fsd f21, KVM_ARCH_FP_D_F21(a0) fsd f22, KVM_ARCH_FP_D_F22(a0) fsd f23, KVM_ARCH_FP_D_F23(a0) fsd f24, KVM_ARCH_FP_D_F24(a0) fsd f25, KVM_ARCH_FP_D_F25(a0) fsd f26, KVM_ARCH_FP_D_F26(a0) fsd f27, KVM_ARCH_FP_D_F27(a0) fsd f28, KVM_ARCH_FP_D_F28(a0) fsd f29, KVM_ARCH_FP_D_F29(a0) fsd f30, KVM_ARCH_FP_D_F30(a0) fsd f31, KVM_ARCH_FP_D_F31(a0) sw t0, KVM_ARCH_FP_D_FCSR(a0) csrw CSR_SSTATUS, t2 ret .align 3 .global __kvm_riscv_fp_f_restore __kvm_riscv_fp_f_restore: csrr t2, CSR_SSTATUS li t1, SR_FS lw t0, KVM_ARCH_FP_F_FCSR(a0) csrs CSR_SSTATUS, t1 flw f0, KVM_ARCH_FP_F_F0(a0) flw f1, KVM_ARCH_FP_F_F1(a0) flw f2, KVM_ARCH_FP_F_F2(a0) flw f3, KVM_ARCH_FP_F_F3(a0) flw f4, KVM_ARCH_FP_F_F4(a0) flw f5, KVM_ARCH_FP_F_F5(a0) flw f6, KVM_ARCH_FP_F_F6(a0) flw f7, KVM_ARCH_FP_F_F7(a0) flw f8, KVM_ARCH_FP_F_F8(a0) flw f9, KVM_ARCH_FP_F_F9(a0) flw f10, KVM_ARCH_FP_F_F10(a0) flw f11, KVM_ARCH_FP_F_F11(a0) flw f12, KVM_ARCH_FP_F_F12(a0) flw f13, KVM_ARCH_FP_F_F13(a0) flw f14, KVM_ARCH_FP_F_F14(a0) flw f15, KVM_ARCH_FP_F_F15(a0) flw f16, KVM_ARCH_FP_F_F16(a0) flw f17, KVM_ARCH_FP_F_F17(a0) flw f18, KVM_ARCH_FP_F_F18(a0) flw f19, KVM_ARCH_FP_F_F19(a0) flw f20, KVM_ARCH_FP_F_F20(a0) flw f21, KVM_ARCH_FP_F_F21(a0) flw f22, KVM_ARCH_FP_F_F22(a0) flw f23, KVM_ARCH_FP_F_F23(a0) flw f24, KVM_ARCH_FP_F_F24(a0) flw f25, KVM_ARCH_FP_F_F25(a0) flw f26, KVM_ARCH_FP_F_F26(a0) flw f27, KVM_ARCH_FP_F_F27(a0) flw f28, KVM_ARCH_FP_F_F28(a0) flw f29, KVM_ARCH_FP_F_F29(a0) flw f30, KVM_ARCH_FP_F_F30(a0) flw f31, KVM_ARCH_FP_F_F31(a0) fscsr t0 csrw CSR_SSTATUS, t2 ret .align 3 .global __kvm_riscv_fp_d_restore __kvm_riscv_fp_d_restore: csrr t2, CSR_SSTATUS li t1, SR_FS lw t0, KVM_ARCH_FP_D_FCSR(a0) csrs CSR_SSTATUS, t1 fld f0, KVM_ARCH_FP_D_F0(a0) fld f1, KVM_ARCH_FP_D_F1(a0) fld f2, KVM_ARCH_FP_D_F2(a0) fld f3, KVM_ARCH_FP_D_F3(a0) fld f4, KVM_ARCH_FP_D_F4(a0) fld f5, KVM_ARCH_FP_D_F5(a0) fld f6, KVM_ARCH_FP_D_F6(a0) fld f7, KVM_ARCH_FP_D_F7(a0) fld f8, KVM_ARCH_FP_D_F8(a0) fld f9, KVM_ARCH_FP_D_F9(a0) fld f10, KVM_ARCH_FP_D_F10(a0) fld f11, KVM_ARCH_FP_D_F11(a0) fld f12, KVM_ARCH_FP_D_F12(a0) fld f13, KVM_ARCH_FP_D_F13(a0) fld f14, KVM_ARCH_FP_D_F14(a0) fld f15, KVM_ARCH_FP_D_F15(a0) fld f16, KVM_ARCH_FP_D_F16(a0) fld f17, KVM_ARCH_FP_D_F17(a0) fld f18, KVM_ARCH_FP_D_F18(a0) fld f19, KVM_ARCH_FP_D_F19(a0) fld f20, KVM_ARCH_FP_D_F20(a0) fld f21, KVM_ARCH_FP_D_F21(a0) fld f22, KVM_ARCH_FP_D_F22(a0) fld f23, KVM_ARCH_FP_D_F23(a0) fld f24, KVM_ARCH_FP_D_F24(a0) fld f25, KVM_ARCH_FP_D_F25(a0) fld f26, KVM_ARCH_FP_D_F26(a0) fld f27, KVM_ARCH_FP_D_F27(a0) fld f28, KVM_ARCH_FP_D_F28(a0) fld f29, KVM_ARCH_FP_D_F29(a0) fld f30, KVM_ARCH_FP_D_F30(a0) fld f31, KVM_ARCH_FP_D_F31(a0) fscsr t0 csrw CSR_SSTATUS, t2 ret #endif
aixcc-public/challenge-001-exemplar-source
1,863
arch/riscv/kernel/vdso/vdso.lds.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Regents of the University of California */ #include <asm/page.h> #include <asm/vdso.h> OUTPUT_ARCH(riscv) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } /* * This linker script is used both with -r and with -shared. * For the layouts to match, we need to skip more than enough * space for the dynamic symbol table, etc. If this amount is * insufficient, ld -shared will error; simply increase it here. */ . = 0x800; .text : { *(.text .text.*) } :text .data : { *(.got.plt) *(.got) *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) } } /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_4.15 { global: __vdso_rt_sigreturn; #ifdef HAS_VGETTIMEOFDAY __vdso_gettimeofday; __vdso_clock_gettime; __vdso_clock_getres; #endif __vdso_getcpu; __vdso_flush_icache; local: *; }; }
aixcc-public/challenge-001-exemplar-source
1,856
arch/riscv/kernel/probes/kprobes_trampoline.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Author: Patrick Stählin <me@packi.ch> */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/asm-offsets.h> .text .altmacro .macro save_all_base_regs REG_S x1, PT_RA(sp) REG_S x3, PT_GP(sp) REG_S x4, PT_TP(sp) REG_S x5, PT_T0(sp) REG_S x6, PT_T1(sp) REG_S x7, PT_T2(sp) REG_S x8, PT_S0(sp) REG_S x9, PT_S1(sp) REG_S x10, PT_A0(sp) REG_S x11, PT_A1(sp) REG_S x12, PT_A2(sp) REG_S x13, PT_A3(sp) REG_S x14, PT_A4(sp) REG_S x15, PT_A5(sp) REG_S x16, PT_A6(sp) REG_S x17, PT_A7(sp) REG_S x18, PT_S2(sp) REG_S x19, PT_S3(sp) REG_S x20, PT_S4(sp) REG_S x21, PT_S5(sp) REG_S x22, PT_S6(sp) REG_S x23, PT_S7(sp) REG_S x24, PT_S8(sp) REG_S x25, PT_S9(sp) REG_S x26, PT_S10(sp) REG_S x27, PT_S11(sp) REG_S x28, PT_T3(sp) REG_S x29, PT_T4(sp) REG_S x30, PT_T5(sp) REG_S x31, PT_T6(sp) .endm .macro restore_all_base_regs REG_L x3, PT_GP(sp) REG_L x4, PT_TP(sp) REG_L x5, PT_T0(sp) REG_L x6, PT_T1(sp) REG_L x7, PT_T2(sp) REG_L x8, PT_S0(sp) REG_L x9, PT_S1(sp) REG_L x10, PT_A0(sp) REG_L x11, PT_A1(sp) REG_L x12, PT_A2(sp) REG_L x13, PT_A3(sp) REG_L x14, PT_A4(sp) REG_L x15, PT_A5(sp) REG_L x16, PT_A6(sp) REG_L x17, PT_A7(sp) REG_L x18, PT_S2(sp) REG_L x19, PT_S3(sp) REG_L x20, PT_S4(sp) REG_L x21, PT_S5(sp) REG_L x22, PT_S6(sp) REG_L x23, PT_S7(sp) REG_L x24, PT_S8(sp) REG_L x25, PT_S9(sp) REG_L x26, PT_S10(sp) REG_L x27, PT_S11(sp) REG_L x28, PT_T3(sp) REG_L x29, PT_T4(sp) REG_L x30, PT_T5(sp) REG_L x31, PT_T6(sp) .endm ENTRY(__kretprobe_trampoline) addi sp, sp, -(PT_SIZE_ON_STACK) save_all_base_regs move a0, sp /* pt_regs */ call trampoline_probe_handler /* use the result as the return-address */ move ra, a0 restore_all_base_regs addi sp, sp, PT_SIZE_ON_STACK ret ENDPROC(__kretprobe_trampoline)
aixcc-public/challenge-001-exemplar-source
4,088
arch/sh/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0 * * relocate_kernel.S - put the kernel image in place to boot * 2005.9.17 kogiidena@eggplant.ddo.jp * * LANDISK/sh4 is supported. Maybe, SH archtecture works well. * * 2009-03-18 Magnus Damm - Added Kexec Jump support */ #include <linux/linkage.h> #include <asm/addrspace.h> #include <asm/page.h> .globl relocate_new_kernel relocate_new_kernel: /* r4 = indirection_page */ /* r5 = reboot_code_buffer */ /* r6 = start_address */ mov.l 10f, r0 /* PAGE_SIZE */ add r5, r0 /* setup new stack at end of control page */ /* save r15->r8 to new stack */ mov.l r15, @-r0 mov r0, r15 mov.l r14, @-r15 mov.l r13, @-r15 mov.l r12, @-r15 mov.l r11, @-r15 mov.l r10, @-r15 mov.l r9, @-r15 mov.l r8, @-r15 /* save other random registers */ sts.l macl, @-r15 sts.l mach, @-r15 stc.l gbr, @-r15 stc.l ssr, @-r15 stc.l sr, @-r15 sts.l pr, @-r15 stc.l spc, @-r15 /* switch to bank1 and save r7->r0 */ mov.l 12f, r9 stc sr, r8 or r9, r8 ldc r8, sr mov.l r7, @-r15 mov.l r6, @-r15 mov.l r5, @-r15 mov.l r4, @-r15 mov.l r3, @-r15 mov.l r2, @-r15 mov.l r1, @-r15 mov.l r0, @-r15 /* switch to bank0 and save r7->r0 */ mov.l 12f, r9 not r9, r9 stc sr, r8 and r9, r8 ldc r8, sr mov.l r7, @-r15 mov.l r6, @-r15 mov.l r5, @-r15 mov.l r4, @-r15 mov.l r3, @-r15 mov.l r2, @-r15 mov.l r1, @-r15 mov.l r0, @-r15 mov.l r4, @-r15 /* save indirection page again */ bsr swap_pages /* swap pages before jumping to new kernel */ nop mova 11f, r0 mov.l r15, @r0 /* save pointer to stack */ jsr @r6 /* hand over control to new kernel */ nop mov.l 11f, r15 /* get pointer to stack */ mov.l @r15+, r4 /* restore r4 to get indirection page */ bsr swap_pages /* swap pages back to previous state */ nop /* make sure bank0 is active and restore r0->r7 */ mov.l 12f, r9 not r9, r9 stc sr, r8 and r9, r8 ldc r8, sr mov.l @r15+, r0 mov.l @r15+, r1 mov.l @r15+, r2 mov.l @r15+, r3 mov.l @r15+, r4 mov.l @r15+, r5 mov.l @r15+, r6 mov.l @r15+, r7 /* switch to bank1 and restore r0->r7 */ mov.l 12f, r9 stc sr, r8 or r9, r8 ldc r8, sr mov.l @r15+, r0 mov.l @r15+, r1 mov.l @r15+, r2 mov.l @r15+, r3 mov.l @r15+, r4 mov.l @r15+, r5 mov.l @r15+, r6 mov.l @r15+, r7 /* switch back to bank0 */ mov.l 12f, r9 not r9, r9 stc sr, r8 and r9, r8 ldc r8, sr /* restore other random registers */ ldc.l @r15+, spc lds.l @r15+, pr ldc.l @r15+, sr ldc.l @r15+, ssr ldc.l @r15+, gbr lds.l @r15+, mach lds.l @r15+, macl /* restore r8->r15 */ mov.l @r15+, r8 mov.l @r15+, r9 mov.l @r15+, r10 mov.l @r15+, r11 mov.l @r15+, r12 mov.l @r15+, r13 mov.l @r15+, r14 mov.l @r15+, r15 rts nop swap_pages: bra 1f mov r4,r0 /* cmd = indirection_page */ 0: mov.l @r4+,r0 /* cmd = *ind++ */ 1: /* addr = cmd & 0xfffffff0 */ mov r0,r2 mov #-16,r1 and r1,r2 /* if(cmd & IND_DESTINATION) dst = addr */ tst #1,r0 bt 2f bra 0b mov r2,r5 2: /* else if(cmd & IND_INDIRECTION) ind = addr */ tst #2,r0 bt 3f bra 0b mov r2,r4 3: /* else if(cmd & IND_DONE) return */ tst #4,r0 bt 4f rts nop 4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */ tst #8,r0 bt 0b mov.l 10f,r3 /* PAGE_SIZE */ shlr2 r3 shlr2 r3 5: dt r3 /* regular kexec just overwrites the destination page * with the contents of the source page. * for the kexec jump case we need to swap the contents * of the pages. * to keep it simple swap the contents for both cases. */ mov.l @(0, r2), r8 mov.l @(0, r5), r1 mov.l r8, @(0, r5) mov.l r1, @(0, r2) mov.l @(4, r2), r8 mov.l @(4, r5), r1 mov.l r8, @(4, r5) mov.l r1, @(4, r2) mov.l @(8, r2), r8 mov.l @(8, r5), r1 mov.l r8, @(8, r5) mov.l r1, @(8, r2) mov.l @(12, r2), r8 mov.l @(12, r5), r1 mov.l r8, @(12, r5) mov.l r1, @(12, r2) add #16,r5 add #16,r2 bf 5b bra 0b nop .align 2 10: .long PAGE_SIZE 11: .long 0 12: .long 0x20000000 ! RB=1 relocate_new_kernel_end: .globl relocate_new_kernel_size relocate_new_kernel_size: .long relocate_new_kernel_end - relocate_new_kernel
aixcc-public/challenge-001-exemplar-source
8,523
arch/sh/kernel/head_32.S
/* SPDX-License-Identifier: GPL-2.0 * $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $ * * arch/sh/kernel/head.S * * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2010 Matt Fleming * * Head.S contains the SH exception handlers and startup code. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/mmu.h> #include <cpu/mmu_context.h> #ifdef CONFIG_CPU_SH4A #define SYNCO() synco #define PREFI(label, reg) \ mov.l label, reg; \ prefi @reg #else #define SYNCO() #define PREFI(label, reg) #endif .section .empty_zero_page, "aw" ENTRY(empty_zero_page) .long 1 /* MOUNT_ROOT_RDONLY */ .long 0 /* RAMDISK_FLAGS */ .long 0x0200 /* ORIG_ROOT_DEV */ .long 1 /* LOADER_TYPE */ .long 0x00000000 /* INITRD_START */ .long 0x00000000 /* INITRD_SIZE */ #ifdef CONFIG_32BIT .long 0x53453f00 + 32 /* "SE?" = 32 bit */ #else .long 0x53453f00 + 29 /* "SE?" = 29 bit */ #endif 1: .skip PAGE_SIZE - empty_zero_page - 1b __HEAD /* * Condition at the entry of _stext: * * BSC has already been initialized. * INTC may or may not be initialized. * VBR may or may not be initialized. * MMU may or may not be initialized. * Cache may or may not be initialized. * Hardware (including on-chip modules) may or may not be initialized. * */ ENTRY(_stext) ! Initialize Status Register mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF ldc r0, sr ! Initialize global interrupt mask #ifdef CONFIG_CPU_HAS_SR_RB mov #0, r0 ldc r0, r6_bank #endif #ifdef CONFIG_OF_EARLY_FLATTREE mov r4, r12 ! Store device tree blob pointer in r12 #endif /* * Prefetch if possible to reduce cache miss penalty. * * We do this early on for SH-4A as a micro-optimization, * as later on we will have speculative execution enabled * and this will become less of an issue. */ PREFI(5f, r0) PREFI(6f, r0) ! mov.l 2f, r0 mov r0, r15 ! Set initial r15 (stack pointer) #ifdef CONFIG_CPU_HAS_SR_RB mov.l 7f, r0 ldc r0, r7_bank ! ... and initial thread_info #endif #ifdef CONFIG_PMB /* * Reconfigure the initial PMB mappings setup by the hardware. * * When we boot in 32-bit MMU mode there are 2 PMB entries already * setup for us. * * Entry VPN PPN V SZ C UB WT * --------------------------------------------------------------- * 0 0x80000000 0x00000000 1 512MB 1 0 1 * 1 0xA0000000 0x00000000 1 512MB 0 0 0 * * But we reprogram them here because we want complete control over * our address space and the initial mappings may not map PAGE_OFFSET * to __MEMORY_START (or even map all of our RAM). * * Once we've setup cached and uncached mappings we clear the rest of the * PMB entries. This clearing also deals with the fact that PMB entries * can persist across reboots. The PMB could have been left in any state * when the reboot occurred, so to be safe we clear all entries and start * with with a clean slate. * * The uncached mapping is constructed using the smallest possible * mapping with a single unbufferable page. Only the kernel text needs to * be covered via the uncached mapping so that certain functions can be * run uncached. * * Drivers and the like that have previously abused the 1:1 identity * mapping are unsupported in 32-bit mode and must specify their caching * preference when page tables are constructed. * * This frees up the P2 space for more nefarious purposes. * * Register utilization is as follows: * * r0 = PMB_DATA data field * r1 = PMB_DATA address field * r2 = PMB_ADDR data field * r3 = PMB_ADDR address field * r4 = PMB_E_SHIFT * r5 = remaining amount of RAM to map * r6 = PMB mapping size we're trying to use * r7 = cached_to_uncached * r8 = scratch register * r9 = scratch register * r10 = number of PMB entries we've setup * r11 = scratch register */ mov.l .LMMUCR, r1 /* Flush the TLB */ mov.l @r1, r0 or #MMUCR_TI, r0 mov.l r0, @r1 mov.l .LMEMORY_SIZE, r5 mov #PMB_E_SHIFT, r0 mov #0x1, r4 shld r0, r4 mov.l .LFIRST_DATA_ENTRY, r0 mov.l .LPMB_DATA, r1 mov.l .LFIRST_ADDR_ENTRY, r2 mov.l .LPMB_ADDR, r3 /* * First we need to walk the PMB and figure out if there are any * existing mappings that match the initial mappings VPN/PPN. * If these have already been established by the bootloader, we * don't bother setting up new entries here, and let the late PMB * initialization take care of things instead. * * Note that we may need to coalesce and merge entries in order * to reclaim more available PMB slots, which is much more than * we want to do at this early stage. */ mov #0, r10 mov #NR_PMB_ENTRIES, r9 mov r1, r7 /* temporary PMB_DATA iter */ .Lvalidate_existing_mappings: mov.l .LPMB_DATA_MASK, r11 mov.l @r7, r8 and r11, r8 cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ bt .Lpmb_done add #1, r10 /* Increment the loop counter */ cmp/eq r9, r10 bf/s .Lvalidate_existing_mappings add r4, r7 /* Increment to the next PMB_DATA entry */ /* * If we've fallen through, continue with setting up the initial * mappings. */ mov r5, r7 /* cached_to_uncached */ mov #0, r10 #ifdef CONFIG_UNCACHED_MAPPING /* * Uncached mapping */ mov #(PMB_SZ_16M >> 2), r9 shll2 r9 mov #(PMB_UB >> 8), r8 shll8 r8 or r0, r8 or r9, r8 mov.l r8, @r1 mov r2, r8 add r7, r8 mov.l r8, @r3 add r4, r1 add r4, r3 add #1, r10 #endif /* * Iterate over all of the available sizes from largest to * smallest for constructing the cached mapping. */ #define __PMB_ITER_BY_SIZE(size) \ .L##size: \ mov #(size >> 4), r6; \ shll16 r6; \ shll8 r6; \ \ cmp/hi r5, r6; \ bt 9999f; \ \ mov #(PMB_SZ_##size##M >> 2), r9; \ shll2 r9; \ \ /* \ * Cached mapping \ */ \ mov #PMB_C, r8; \ or r0, r8; \ or r9, r8; \ mov.l r8, @r1; \ mov.l r2, @r3; \ \ /* Increment to the next PMB_DATA entry */ \ add r4, r1; \ /* Increment to the next PMB_ADDR entry */ \ add r4, r3; \ /* Increment number of PMB entries */ \ add #1, r10; \ \ sub r6, r5; \ add r6, r0; \ add r6, r2; \ \ bra .L##size; \ 9999: __PMB_ITER_BY_SIZE(512) __PMB_ITER_BY_SIZE(128) __PMB_ITER_BY_SIZE(64) __PMB_ITER_BY_SIZE(16) #ifdef CONFIG_UNCACHED_MAPPING /* * Now that we can access it, update cached_to_uncached and * uncached_size. */ mov.l .Lcached_to_uncached, r0 mov.l r7, @r0 mov.l .Luncached_size, r0 mov #1, r7 shll16 r7 shll8 r7 mov.l r7, @r0 #endif /* * Clear the remaining PMB entries. * * r3 = entry to begin clearing from * r10 = number of entries we've setup so far */ mov #0, r1 mov #NR_PMB_ENTRIES, r0 .Lagain: mov.l r1, @r3 /* Clear PMB_ADDR entry */ add #1, r10 /* Increment the loop counter */ cmp/eq r0, r10 bf/s .Lagain add r4, r3 /* Increment to the next PMB_ADDR entry */ mov.l 6f, r0 icbi @r0 .Lpmb_done: #endif /* CONFIG_PMB */ #ifndef CONFIG_SH_NO_BSS_INIT /* * Don't clear BSS if running on slow platforms such as an RTL simulation, * remote memory via SHdebug link, etc. For these the memory can be guaranteed * to be all zero on boot anyway. */ ! Clear BSS area #ifdef CONFIG_SMP mov.l 3f, r0 cmp/eq #0, r0 ! skip clear if set to zero bt 10f #endif mov.l 3f, r1 add #4, r1 mov.l 4f, r2 mov #0, r0 9: cmp/hs r2, r1 bf/s 9b ! while (r1 < r2) mov.l r0,@-r2 10: #endif #ifdef CONFIG_OF_EARLY_FLATTREE mov.l 8f, r0 ! Make flat device tree available early. jsr @r0 mov r12, r4 #endif ! Additional CPU initialization mov.l 6f, r0 jsr @r0 nop SYNCO() ! Wait for pending instructions.. ! Start kernel mov.l 5f, r0 jmp @r0 nop .balign 4 #if defined(CONFIG_CPU_SH2) 1: .long 0x000000F0 ! IMASK=0xF #else 1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF #endif ENTRY(stack_start) 2: .long init_thread_union+THREAD_SIZE 3: .long __bss_start 4: .long _end 5: .long start_kernel 6: .long cpu_init 7: .long init_thread_union #if defined(CONFIG_OF_EARLY_FLATTREE) 8: .long sh_fdt_init #endif #ifdef CONFIG_PMB .LPMB_ADDR: .long PMB_ADDR .LPMB_DATA: .long PMB_DATA .LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V .LMMUCR: .long MMUCR .LMEMORY_SIZE: .long __MEMORY_SIZE #ifdef CONFIG_UNCACHED_MAPPING .Lcached_to_uncached: .long cached_to_uncached .Luncached_size: .long uncached_size #endif #endif
aixcc-public/challenge-001-exemplar-source
8,743
arch/sh/kernel/entry-common.S
/* SPDX-License-Identifier: GPL-2.0 * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2003 - 2008 Paul Mundt */ ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address ! to be jumped is too far, but it causes illegal slot exception. /* * entry.S contains the system-call and fault low-level handling routines. * This also contains the timer-interrupt handler, as well as all interrupts * and faults that can result in a task-switch. * * NOTE: This code handles signal-recognition, which happens every time * after a timer-interrupt and after each system call. * * NOTE: This code uses a convention that instructions in the delay slot * of a transfer-control instruction are indented by an extra space, thus: * * jmp @k0 ! control-transfer instruction * ldc k1, ssr ! delay slot * * Stack layout in 'ret_from_syscall': * ptrace needs to have all regs on the stack. * if the order here is changed, it needs to be * updated in ptrace.c and ptrace.h * * r0 * ... * r15 = stack pointer * spc * pr * ssr * gbr * mach * macl * syscall # * */ #include <asm/dwarf.h> #if defined(CONFIG_PREEMPTION) # define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() # define resume_kernel __restore_all #endif .align 2 ENTRY(exception_error) ! TRACE_IRQS_ON sti mov.l 1f, r0 jmp @r0 nop .align 2 1: .long do_exception_error .align 2 ret_from_exception: CFI_STARTPROC simple CFI_DEF_CFA r14, 0 CFI_REL_OFFSET 17, 64 CFI_REL_OFFSET 15, 60 CFI_REL_OFFSET 14, 56 CFI_REL_OFFSET 13, 52 CFI_REL_OFFSET 12, 48 CFI_REL_OFFSET 11, 44 CFI_REL_OFFSET 10, 40 CFI_REL_OFFSET 9, 36 CFI_REL_OFFSET 8, 32 preempt_stop() ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 bt resume_kernel ! Yes, it's from kernel, go back soon #ifdef CONFIG_PREEMPTION bra resume_userspace nop ENTRY(resume_kernel) cli TRACE_IRQS_OFF mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count tst r0, r0 bf noresched need_resched: mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #_TIF_NEED_RESCHED, r0 ! need_resched set? bt noresched mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register shlr r0 and #(0xf0>>1), r0 ! interrupts off (exception path)? cmp/eq #(0xf0>>1), r0 bt noresched mov.l 1f, r0 jsr @r0 ! call preempt_schedule_irq nop bra need_resched nop noresched: bra __restore_all nop .align 2 1: .long preempt_schedule_irq #endif ENTRY(resume_userspace) ! r8: current_thread_info cli TRACE_IRQS_OFF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all tst #_TIF_NEED_RESCHED, r0 .align 2 work_pending: ! r0: current_thread_info->flags ! r8: current_thread_info ! t: result of "tst #_TIF_NEED_RESCHED, r0" bf/s work_resched tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0 work_notifysig: bt/s __restore_all mov r15, r4 mov r12, r5 ! set arg1(save_r0) mov r0, r6 sti mov.l 2f, r1 mov.l 3f, r0 jmp @r1 lds r0, pr work_resched: mov.l 1f, r1 jsr @r1 ! schedule nop cli TRACE_IRQS_OFF ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt __restore_all bra work_pending tst #_TIF_NEED_RESCHED, r0 .align 2 1: .long schedule 2: .long do_notify_resume 3: .long resume_userspace .align 2 syscall_exit_work: ! r0: current_thread_info->flags ! r8: current_thread_info tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 TRACE_IRQS_ON sti mov r15, r4 mov.l 8f, r0 ! do_syscall_trace_leave jsr @r0 nop bra resume_userspace nop __restore_all: mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register shlr2 r0 and #0x3c, r0 cmp/eq #0x3c, r0 bt 1f TRACE_IRQS_ON bra 2f nop 1: TRACE_IRQS_OFF 2: mov.l 3f, r0 jmp @r0 nop .align 2 3: .long restore_all .align 2 syscall_badsys: ! Bad syscall number get_current_thread_info r8, r0 mov #-ENOSYS, r0 bra resume_userspace mov.l r0, @(OFF_R0,r15) ! Return value /* * The main debug trap handler. * * r8=TRA (not the trap number!) * * Note: This assumes that the trapa value is left in its original * form (without the shlr2 shift) so the calculation for the jump * call table offset remains a simple in place mask. */ debug_trap: mov r8, r0 and #(0xf << 2), r0 mov.l 1f, r8 add r0, r8 mov.l @r8, r8 jsr @r8 nop bra ret_from_exception nop CFI_ENDPROC .align 2 1: .long debug_trap_table /* * Syscall interface: * * Syscall #: R3 * Arguments #0 to #3: R4--R7 * Arguments #4 to #6: R0, R1, R2 * TRA: See following table. * * (TRA>>2) Purpose * -------- ------- * 0x00-0x0f original SH-3/4 syscall ABI (not in general use). * 0x10-0x1f general SH-3/4 syscall ABI. * 0x1f unified SH-2/3/4 syscall ABI (preferred). * 0x20-0x2f original SH-2 syscall ABI. * 0x30-0x3f debug traps used by the kernel. * 0x40-0xff Not supported by all parts, so left unhandled. * * For making system calls, any trap number in the range for the * given cpu model may be used, but the unified trap number 0x1f is * preferred for compatibility with all models. * * The low bits of the trap number were once documented as matching * the number of arguments, but they were never actually used as such * by the kernel. SH-2 originally used its own separate trap range * because several hardware exceptions fell in the range used for the * SH-3/4 syscall ABI. * * This code also handles delegating other traps to the BIOS/gdb stub. * * Note: When we're first called, the TRA value must be shifted * right 2 bits in order to get the value that was used as the "trapa" * argument. */ .align 2 .globl ret_from_fork ret_from_fork: mov.l 1f, r8 jsr @r8 mov r0, r4 bra syscall_exit nop .align 2 .globl ret_from_kernel_thread ret_from_kernel_thread: mov.l 1f, r8 jsr @r8 mov r0, r4 mov.l @(OFF_R5,r15), r5 ! fn jsr @r5 mov.l @(OFF_R4,r15), r4 ! arg bra syscall_exit nop .align 2 1: .long schedule_tail /* * The poorly named main trapa decode and dispatch routine, for * system calls and debug traps through their respective jump tables. */ ENTRY(system_call) setup_frame_reg #if !defined(CONFIG_CPU_SH2) mov.l 1f, r9 mov.l @r9, r8 ! Read from TRA (Trap Address) Register #endif mov #OFF_TRA, r10 add r15, r10 mov.l r8, @r10 ! set TRA value to tra /* * Check the trap type */ mov #((0x20 << 2) - 1), r9 cmp/hi r9, r8 bt/s debug_trap ! it's a debug trap.. nop TRACE_IRQS_ON sti ! get_current_thread_info r8, r10 mov.l @(TI_FLAGS,r8), r8 mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10 mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9 tst r10, r8 shll8 r9 bf syscall_trace_entry tst r9, r8 bf syscall_trace_entry ! mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! syscall_call: shll2 r3 ! x4 mov.l 3f, r8 ! Load the address of sys_call_table add r8, r3 mov.l @r3, r8 mov.l @(OFF_R2,r15), r2 mov.l @(OFF_R1,r15), r1 mov.l @(OFF_R0,r15), r0 mov.l r2, @-r15 mov.l r1, @-r15 mov.l r0, @-r15 jsr @r8 ! jump to specific syscall handler nop add #12, r15 mov.l @(OFF_R0,r15), r12 ! save r0 mov.l r0, @(OFF_R0,r15) ! save the return value ! syscall_exit: cli TRACE_IRQS_OFF ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_ALLWORK_MASK & 0xff), r0 mov #(_TIF_ALLWORK_MASK >> 8), r1 bf syscall_exit_work shlr8 r0 tst r0, r1 bf syscall_exit_work bra __restore_all nop .align 2 syscall_trace_entry: ! Yes it is traced. mov r15, r4 mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies jsr @r11 ! superior (will chomp R[0-7]) nop cmp/eq #-1, r0 bt syscall_exit ! Reload R0-R4 from kernel stack, where the ! parent may have modified them using ! ptrace(POKEUSR). (Note that R0-R2 are ! reloaded from the kernel stack by syscall_call ! below, so don't need to be reloaded here.) ! This allows the parent to rewrite system calls ! and args on the fly. mov.l @(OFF_R4,r15), r4 ! arg0 mov.l @(OFF_R5,r15), r5 mov.l @(OFF_R6,r15), r6 mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 bra syscall_exit mov.l r0, @(OFF_R0,r15) ! Return value .align 2 #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif 6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave
aixcc-public/challenge-001-exemplar-source
1,562
arch/sh/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script to make SuperH Linux kernel * Written by Niibe Yutaka and Paul Mundt */ OUTPUT_ARCH(sh) #define RUNTIME_DISCARD_EXIT #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/vmlinux.lds.h> #ifdef CONFIG_PMB #define MEMORY_OFFSET 0 #else #define MEMORY_OFFSET __MEMORY_START #endif ENTRY(_start) SECTIONS { . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET; _text = .; /* Text and read-only data */ .empty_zero_page : AT(ADDR(.empty_zero_page)) { *(.empty_zero_page) } = 0 .text : AT(ADDR(.text)) { HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) _etext = .; /* End of text section */ } = 0x0009 EXCEPTION_TABLE(16) _sdata = .; RO_DATA(PAGE_SIZE) RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; DWARF_EH_FRAME . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) . = ALIGN(4); .machvec.init : AT(ADDR(.machvec.init)) { __machvec_start = .; *(.machvec.init) __machvec_end = .; } PERCPU_SECTION(L1_CACHE_BYTES) /* * .exit.text is discarded at runtime, not link time, to deal with * references from __bug_table */ .exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT } .exit.data : AT(ADDR(.exit.data)) { EXIT_DATA } . = ALIGN(PAGE_SIZE); __init_end = .; BSS_SECTION(0, PAGE_SIZE, 4) _end = . ; STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS }
aixcc-public/challenge-001-exemplar-source
9,131
arch/sh/lib/udivsi3_i4i.S
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. */ !! libgcc routines for the Renesas / SuperH SH CPUs. !! Contributed by Steve Chamberlain. !! sac@cygnus.com !! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines !! recoded in assembly by Toshiyasu Morita !! tm@netcom.com /* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and ELF local label prefixes by J"orn Rennecke amylaar@cygnus.com */ /* This code used shld, thus is not suitable for SH1 / SH2. */ /* Signed / unsigned division without use of FPU, optimized for SH4. Uses a lookup table for divisors in the range -128 .. +128, and div1 with case distinction for larger divisors in three more ranges. The code is lumped together with the table to allow the use of mova. */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define L_LSB 0 #define L_LSWMSB 1 #define L_MSWLSB 2 #else #define L_LSB 3 #define L_LSWMSB 2 #define L_MSWLSB 1 #endif .balign 4 .global __udivsi3_i4i .global __udivsi3_i4 .set __udivsi3_i4, __udivsi3_i4i .type __udivsi3_i4i, @function __udivsi3_i4i: mov.w c128_w, r1 div0u mov r4,r0 shlr8 r0 cmp/hi r1,r5 extu.w r5,r1 bf udiv_le128 cmp/eq r5,r1 bf udiv_ge64k shlr r0 mov r5,r1 shll16 r5 mov.l r4,@-r15 div1 r5,r0 mov.l r1,@-r15 div1 r5,r0 div1 r5,r0 bra udiv_25 div1 r5,r0 div_le128: mova div_table_ix,r0 bra div_le128_2 mov.b @(r0,r5),r1 udiv_le128: mov.l r4,@-r15 mova div_table_ix,r0 mov.b @(r0,r5),r1 mov.l r5,@-r15 div_le128_2: mova div_table_inv,r0 mov.l @(r0,r1),r1 mov r5,r0 tst #0xfe,r0 mova div_table_clz,r0 dmulu.l r1,r4 mov.b @(r0,r5),r1 bt/s div_by_1 mov r4,r0 mov.l @r15+,r5 sts mach,r0 /* clrt */ addc r4,r0 mov.l @r15+,r4 rotcr r0 rts shld r1,r0 div_by_1_neg: neg r4,r0 div_by_1: mov.l @r15+,r5 rts mov.l @r15+,r4 div_ge64k: bt/s div_r8 div0u shll8 r5 bra div_ge64k_2 div1 r5,r0 udiv_ge64k: cmp/hi r0,r5 mov r5,r1 bt udiv_r8 shll8 r5 mov.l r4,@-r15 div1 r5,r0 mov.l r1,@-r15 div_ge64k_2: div1 r5,r0 mov.l zero_l,r1 .rept 4 div1 r5,r0 .endr mov.l r1,@-r15 div1 r5,r0 mov.w m256_w,r1 div1 r5,r0 mov.b r0,@(L_LSWMSB,r15) xor r4,r0 and r1,r0 bra div_ge64k_end xor r4,r0 div_r8: shll16 r4 bra div_r8_2 shll8 r4 udiv_r8: mov.l r4,@-r15 shll16 r4 clrt shll8 r4 mov.l r5,@-r15 div_r8_2: rotcl r4 mov r0,r1 div1 r5,r1 mov r4,r0 rotcl r0 mov r5,r4 div1 r5,r1 .rept 5 rotcl r0; div1 r5,r1 .endr rotcl r0 mov.l @r15+,r5 div1 r4,r1 mov.l @r15+,r4 rts rotcl r0 .global __sdivsi3_i4i .global __sdivsi3_i4 .global __sdivsi3 .set __sdivsi3_i4, __sdivsi3_i4i .set __sdivsi3, __sdivsi3_i4i .type __sdivsi3_i4i, @function /* This is link-compatible with a __sdivsi3 call, but we effectively clobber only r1. */ __sdivsi3_i4i: mov.l r4,@-r15 cmp/pz r5 mov.w c128_w, r1 bt/s pos_divisor cmp/pz r4 mov.l r5,@-r15 neg r5,r5 bt/s neg_result cmp/hi r1,r5 neg r4,r4 pos_result: extu.w r5,r0 bf div_le128 cmp/eq r5,r0 mov r4,r0 shlr8 r0 bf/s div_ge64k cmp/hi r0,r5 div0u shll16 r5 div1 r5,r0 div1 r5,r0 div1 r5,r0 udiv_25: mov.l zero_l,r1 div1 r5,r0 div1 r5,r0 mov.l r1,@-r15 .rept 3 div1 r5,r0 .endr mov.b r0,@(L_MSWLSB,r15) xtrct r4,r0 swap.w r0,r0 .rept 8 div1 r5,r0 .endr mov.b r0,@(L_LSWMSB,r15) div_ge64k_end: .rept 8 div1 r5,r0 .endr mov.l @r15+,r4 ! zero-extension and swap using LS unit. extu.b r0,r0 mov.l @r15+,r5 or r4,r0 mov.l @r15+,r4 rts rotcl r0 div_le128_neg: tst #0xfe,r0 mova div_table_ix,r0 mov.b @(r0,r5),r1 mova div_table_inv,r0 bt/s div_by_1_neg mov.l @(r0,r1),r1 mova div_table_clz,r0 dmulu.l r1,r4 mov.b @(r0,r5),r1 mov.l @r15+,r5 sts mach,r0 /* clrt */ addc r4,r0 mov.l @r15+,r4 rotcr r0 shld r1,r0 rts neg r0,r0 pos_divisor: mov.l r5,@-r15 bt/s pos_result cmp/hi r1,r5 neg r4,r4 neg_result: extu.w r5,r0 bf div_le128_neg cmp/eq r5,r0 mov r4,r0 shlr8 r0 bf/s div_ge64k_neg cmp/hi r0,r5 div0u mov.l zero_l,r1 shll16 r5 div1 r5,r0 mov.l r1,@-r15 .rept 7 div1 r5,r0 .endr mov.b r0,@(L_MSWLSB,r15) xtrct r4,r0 swap.w r0,r0 .rept 8 div1 r5,r0 .endr mov.b r0,@(L_LSWMSB,r15) div_ge64k_neg_end: .rept 8 div1 r5,r0 .endr mov.l @r15+,r4 ! zero-extension and swap using LS unit. extu.b r0,r1 mov.l @r15+,r5 or r4,r1 div_r8_neg_end: mov.l @r15+,r4 rotcl r1 rts neg r1,r0 div_ge64k_neg: bt/s div_r8_neg div0u shll8 r5 mov.l zero_l,r1 .rept 6 div1 r5,r0 .endr mov.l r1,@-r15 div1 r5,r0 mov.w m256_w,r1 div1 r5,r0 mov.b r0,@(L_LSWMSB,r15) xor r4,r0 and r1,r0 bra div_ge64k_neg_end xor r4,r0 c128_w: .word 128 div_r8_neg: clrt shll16 r4 mov r4,r1 shll8 r1 mov r5,r4 .rept 7 rotcl r1; div1 r5,r0 .endr mov.l @r15+,r5 rotcl r1 bra div_r8_neg_end div1 r4,r0 m256_w: .word 0xff00 /* This table has been generated by divtab-sh4.c. */ .balign 4 div_table_clz: .byte 0 .byte 1 .byte 0 .byte -1 .byte -1 .byte -2 .byte -2 .byte -2 .byte -2 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 /* Lookup table translating positive divisor to index into table of normalized inverse. N.B. the '0' entry is also the last entry of the previous table, and causes an unaligned access for division by zero. */ div_table_ix: .byte -6 .byte -128 .byte -128 .byte 0 .byte -128 .byte -64 .byte 0 .byte 64 .byte -128 .byte -96 .byte -64 .byte -32 .byte 0 .byte 32 .byte 64 .byte 96 .byte -128 .byte -112 .byte -96 .byte -80 .byte -64 .byte -48 .byte -32 .byte -16 .byte 0 .byte 16 .byte 32 .byte 48 .byte 64 .byte 80 .byte 96 .byte 112 .byte -128 .byte -120 .byte -112 .byte -104 .byte -96 .byte -88 .byte -80 .byte -72 .byte -64 .byte -56 .byte -48 .byte -40 .byte -32 .byte -24 .byte -16 .byte -8 .byte 0 .byte 8 .byte 16 .byte 24 .byte 32 .byte 40 .byte 48 .byte 56 .byte 64 .byte 72 .byte 80 .byte 88 .byte 96 .byte 104 .byte 112 .byte 120 .byte -128 .byte -124 .byte -120 .byte -116 .byte -112 .byte -108 .byte -104 .byte -100 .byte -96 .byte -92 .byte -88 .byte -84 .byte -80 .byte -76 .byte -72 .byte -68 .byte -64 .byte -60 .byte -56 .byte -52 .byte -48 .byte -44 .byte -40 .byte -36 .byte -32 .byte -28 .byte -24 .byte -20 .byte -16 .byte -12 .byte -8 .byte -4 .byte 0 .byte 4 .byte 8 .byte 12 .byte 16 .byte 20 .byte 24 .byte 28 .byte 32 .byte 36 .byte 40 .byte 44 .byte 48 .byte 52 .byte 56 .byte 60 .byte 64 .byte 68 .byte 72 .byte 76 .byte 80 .byte 84 .byte 88 .byte 92 .byte 96 .byte 100 .byte 104 .byte 108 .byte 112 .byte 116 .byte 120 .byte 124 .byte -128 /* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */ .balign 4 zero_l: .long 0x0 .long 0xF81F81F9 .long 0xF07C1F08 .long 0xE9131AC0 .long 0xE1E1E1E2 .long 0xDAE6076C .long 0xD41D41D5 .long 0xCD856891 .long 0xC71C71C8 .long 0xC0E07039 .long 0xBACF914D .long 0xB4E81B4F .long 0xAF286BCB .long 0xA98EF607 .long 0xA41A41A5 .long 0x9EC8E952 .long 0x9999999A .long 0x948B0FCE .long 0x8F9C18FA .long 0x8ACB90F7 .long 0x86186187 .long 0x81818182 .long 0x7D05F418 .long 0x78A4C818 .long 0x745D1746 .long 0x702E05C1 .long 0x6C16C16D .long 0x68168169 .long 0x642C8591 .long 0x60581606 .long 0x5C9882BA .long 0x58ED2309 div_table_inv: .long 0x55555556 .long 0x51D07EAF .long 0x4E5E0A73 .long 0x4AFD6A06 .long 0x47AE147B .long 0x446F8657 .long 0x41414142 .long 0x3E22CBCF .long 0x3B13B13C .long 0x38138139 .long 0x3521CFB3 .long 0x323E34A3 .long 0x2F684BDB .long 0x2C9FB4D9 .long 0x29E4129F .long 0x27350B89 .long 0x24924925 .long 0x21FB7813 .long 0x1F7047DD .long 0x1CF06ADB .long 0x1A7B9612 .long 0x18118119 .long 0x15B1E5F8 .long 0x135C8114 .long 0x11111112 .long 0xECF56BF .long 0xC9714FC .long 0xA6810A7 .long 0x8421085 .long 0x624DD30 .long 0x4104105 .long 0x2040811 /* maximum error: 0.987342 scaled: 0.921875*/
aixcc-public/challenge-001-exemplar-source
2,816
arch/sh/lib/ashrsi3.S
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. */ !! libgcc routines for the Renesas / SuperH SH CPUs. !! Contributed by Steve Chamberlain. !! sac@cygnus.com !! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines !! recoded in assembly by Toshiyasu Morita !! tm@netcom.com /* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and ELF local label prefixes by J"orn Rennecke amylaar@cygnus.com */ ! ! __ashrsi3 ! ! Entry: ! ! r4: Value to shift ! r5: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) ! ! __ashrsi3_r0 ! ! Entry: ! ! r4: Value to shift ! r0: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) .global __ashrsi3 .global __ashrsi3_r0 .align 2 __ashrsi3: mov r5,r0 .align 2 __ashrsi3_r0: and #31,r0 mov.l r4,@-r15 mov r0,r4 mova ashrsi3_table,r0 mov.b @(r0,r4),r4 add r4,r0 jmp @r0 mov.l @r15+,r0 .align 2 ashrsi3_table: .byte ashrsi3_0-ashrsi3_table .byte ashrsi3_1-ashrsi3_table .byte ashrsi3_2-ashrsi3_table .byte ashrsi3_3-ashrsi3_table .byte ashrsi3_4-ashrsi3_table .byte ashrsi3_5-ashrsi3_table .byte ashrsi3_6-ashrsi3_table .byte ashrsi3_7-ashrsi3_table .byte ashrsi3_8-ashrsi3_table .byte ashrsi3_9-ashrsi3_table .byte ashrsi3_10-ashrsi3_table .byte ashrsi3_11-ashrsi3_table .byte ashrsi3_12-ashrsi3_table .byte ashrsi3_13-ashrsi3_table .byte ashrsi3_14-ashrsi3_table .byte ashrsi3_15-ashrsi3_table .byte ashrsi3_16-ashrsi3_table .byte ashrsi3_17-ashrsi3_table .byte ashrsi3_18-ashrsi3_table .byte ashrsi3_19-ashrsi3_table .byte ashrsi3_20-ashrsi3_table .byte ashrsi3_21-ashrsi3_table .byte ashrsi3_22-ashrsi3_table .byte ashrsi3_23-ashrsi3_table .byte ashrsi3_24-ashrsi3_table .byte ashrsi3_25-ashrsi3_table .byte ashrsi3_26-ashrsi3_table .byte ashrsi3_27-ashrsi3_table .byte ashrsi3_28-ashrsi3_table .byte ashrsi3_29-ashrsi3_table .byte ashrsi3_30-ashrsi3_table .byte ashrsi3_31-ashrsi3_table ashrsi3_31: rotcl r0 rts subc r0,r0 ashrsi3_30: shar r0 ashrsi3_29: shar r0 ashrsi3_28: shar r0 ashrsi3_27: shar r0 ashrsi3_26: shar r0 ashrsi3_25: shar r0 ashrsi3_24: shlr16 r0 shlr8 r0 rts exts.b r0,r0 ashrsi3_23: shar r0 ashrsi3_22: shar r0 ashrsi3_21: shar r0 ashrsi3_20: shar r0 ashrsi3_19: shar r0 ashrsi3_18: shar r0 ashrsi3_17: shar r0 ashrsi3_16: shlr16 r0 rts exts.w r0,r0 ashrsi3_15: shar r0 ashrsi3_14: shar r0 ashrsi3_13: shar r0 ashrsi3_12: shar r0 ashrsi3_11: shar r0 ashrsi3_10: shar r0 ashrsi3_9: shar r0 ashrsi3_8: shar r0 ashrsi3_7: shar r0 ashrsi3_6: shar r0 ashrsi3_5: shar r0 ashrsi3_4: shar r0 ashrsi3_3: shar r0 ashrsi3_2: shar r0 ashrsi3_1: rts shar r0 ashrsi3_0: rts nop
aixcc-public/challenge-001-exemplar-source
2,867
arch/sh/lib/ashlsi3.S
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. */ !! libgcc routines for the Renesas / SuperH SH CPUs. !! Contributed by Steve Chamberlain. !! sac@cygnus.com !! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines !! recoded in assembly by Toshiyasu Morita !! tm@netcom.com /* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and ELF local label prefixes by J"orn Rennecke amylaar@cygnus.com */ ! ! __ashlsi3 ! ! Entry: ! ! r4: Value to shift ! r5: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) ! ! __ashlsi3_r0 ! ! Entry: ! ! r4: Value to shift ! r0: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) .global __ashlsi3 .global __ashlsi3_r0 .align 2 __ashlsi3: mov r5,r0 .align 2 __ashlsi3_r0: and #31,r0 mov.l r4,@-r15 mov r0,r4 mova ashlsi3_table,r0 mov.b @(r0,r4),r4 add r4,r0 jmp @r0 mov.l @r15+,r0 .align 2 ashlsi3_table: .byte ashlsi3_0-ashlsi3_table .byte ashlsi3_1-ashlsi3_table .byte ashlsi3_2-ashlsi3_table .byte ashlsi3_3-ashlsi3_table .byte ashlsi3_4-ashlsi3_table .byte ashlsi3_5-ashlsi3_table .byte ashlsi3_6-ashlsi3_table .byte ashlsi3_7-ashlsi3_table .byte ashlsi3_8-ashlsi3_table .byte ashlsi3_9-ashlsi3_table .byte ashlsi3_10-ashlsi3_table .byte ashlsi3_11-ashlsi3_table .byte ashlsi3_12-ashlsi3_table .byte ashlsi3_13-ashlsi3_table .byte ashlsi3_14-ashlsi3_table .byte ashlsi3_15-ashlsi3_table .byte ashlsi3_16-ashlsi3_table .byte ashlsi3_17-ashlsi3_table .byte ashlsi3_18-ashlsi3_table .byte ashlsi3_19-ashlsi3_table .byte ashlsi3_20-ashlsi3_table .byte ashlsi3_21-ashlsi3_table .byte ashlsi3_22-ashlsi3_table .byte ashlsi3_23-ashlsi3_table .byte ashlsi3_24-ashlsi3_table .byte ashlsi3_25-ashlsi3_table .byte ashlsi3_26-ashlsi3_table .byte ashlsi3_27-ashlsi3_table .byte ashlsi3_28-ashlsi3_table .byte ashlsi3_29-ashlsi3_table .byte ashlsi3_30-ashlsi3_table .byte ashlsi3_31-ashlsi3_table ashlsi3_6: shll2 r0 ashlsi3_4: shll2 r0 ashlsi3_2: rts shll2 r0 ashlsi3_7: shll2 r0 ashlsi3_5: shll2 r0 ashlsi3_3: shll2 r0 ashlsi3_1: rts shll r0 ashlsi3_14: shll2 r0 ashlsi3_12: shll2 r0 ashlsi3_10: shll2 r0 ashlsi3_8: rts shll8 r0 ashlsi3_15: shll2 r0 ashlsi3_13: shll2 r0 ashlsi3_11: shll2 r0 ashlsi3_9: shll8 r0 rts shll r0 ashlsi3_22: shll2 r0 ashlsi3_20: shll2 r0 ashlsi3_18: shll2 r0 ashlsi3_16: rts shll16 r0 ashlsi3_23: shll2 r0 ashlsi3_21: shll2 r0 ashlsi3_19: shll2 r0 ashlsi3_17: shll16 r0 rts shll r0 ashlsi3_30: shll2 r0 ashlsi3_28: shll2 r0 ashlsi3_26: shll2 r0 ashlsi3_24: shll16 r0 rts shll8 r0 ashlsi3_31: shll2 r0 ashlsi3_29: shll2 r0 ashlsi3_27: shll2 r0 ashlsi3_25: shll16 r0 shll8 r0 rts shll r0 ashlsi3_0: rts nop
aixcc-public/challenge-001-exemplar-source
3,689
arch/sh/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* $Id: memcpy.S,v 1.3 2001/07/27 11:50:52 gniibe Exp $ * * "memcpy" implementation of SuperH * * Copyright (C) 1999 Niibe Yutaka * */ /* * void *memcpy(void *dst, const void *src, size_t n); * No overlap between the memory of DST and of SRC are assumed. */ #include <linux/linkage.h> ENTRY(memcpy) tst r6,r6 bt/s 9f ! if n=0, do nothing mov r4,r0 sub r4,r5 ! From here, r5 has the distance to r0 add r6,r0 ! From here, r0 points the end of copying point mov #12,r1 cmp/gt r6,r1 bt/s 7f ! if it's too small, copy a byte at once add #-1,r5 add #1,r5 ! From here, r6 is free ! ! r4 --> [ ... ] DST [ ... ] SRC ! [ ... ] [ ... ] ! : : ! r0 --> [ ... ] r0+r5 --> [ ... ] ! ! mov r5,r1 mov #3,r2 and r2,r1 shll2 r1 mov r0,r3 ! Save the value on R0 to R3 mova jmptable,r0 add r1,r0 mov.l @r0,r1 jmp @r1 mov r3,r0 ! and back to R0 .balign 4 jmptable: .long case0 .long case1 .long case2 .long case3 ! copy a byte at once 7: mov r4,r2 add #1,r2 8: cmp/hi r2,r0 mov.b @(r0,r5),r1 bt/s 8b ! while (r0>r2) mov.b r1,@-r0 9: rts nop case0: ! ! GHIJ KLMN OPQR --> GHIJ KLMN OPQR ! ! First, align to long word boundary mov r0,r3 and r2,r3 tst r3,r3 bt/s 2f add #-4,r5 add #3,r5 1: dt r3 mov.b @(r0,r5),r1 bf/s 1b mov.b r1,@-r0 ! add #-3,r5 2: ! Second, copy a long word at once mov r4,r2 add #7,r2 3: mov.l @(r0,r5),r1 cmp/hi r2,r0 bt/s 3b mov.l r1,@-r0 ! ! Third, copy a byte at once, if necessary cmp/eq r4,r0 bt/s 9b add #3,r5 bra 8b add #-6,r2 case1: ! ! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR. ! ! First, align to long word boundary mov r0,r3 and r2,r3 tst r3,r3 bt/s 2f add #-1,r5 1: dt r3 mov.b @(r0,r5),r1 bf/s 1b mov.b r1,@-r0 ! 2: ! Second, read a long word and write a long word at once mov.l @(r0,r5),r1 add #-4,r5 mov r4,r2 add #7,r2 ! #ifdef __LITTLE_ENDIAN__ 3: mov r1,r3 ! RQPO shll16 r3 shll8 r3 ! Oxxx mov.l @(r0,r5),r1 ! NMLK mov r1,r6 shlr8 r6 ! xNML or r6,r3 ! ONML cmp/hi r2,r0 bt/s 3b mov.l r3,@-r0 #else 3: mov r1,r3 ! OPQR shlr16 r3 shlr8 r3 ! xxxO mov.l @(r0,r5),r1 ! KLMN mov r1,r6 shll8 r6 ! LMNx or r6,r3 ! LMNO cmp/hi r2,r0 bt/s 3b mov.l r3,@-r0 #endif ! ! Third, copy a byte at once, if necessary cmp/eq r4,r0 bt/s 9b add #4,r5 bra 8b add #-6,r2 case2: ! ! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR.. ! ! First, align to word boundary tst #1,r0 bt/s 2f add #-1,r5 mov.b @(r0,r5),r1 mov.b r1,@-r0 ! 2: ! Second, read a word and write a word at once add #-1,r5 mov r4,r2 add #3,r2 ! 3: mov.w @(r0,r5),r1 cmp/hi r2,r0 bt/s 3b mov.w r1,@-r0 ! ! Third, copy a byte at once, if necessary cmp/eq r4,r0 bt/s 9b add #1,r5 mov.b @(r0,r5),r1 rts mov.b r1,@-r0 case3: ! ! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R... ! ! First, align to long word boundary mov r0,r3 and r2,r3 tst r3,r3 bt/s 2f add #-1,r5 1: dt r3 mov.b @(r0,r5),r1 bf/s 1b mov.b r1,@-r0 ! 2: ! Second, read a long word and write a long word at once add #-2,r5 mov.l @(r0,r5),r1 add #-4,r5 mov r4,r2 add #7,r2 ! #ifdef __LITTLE_ENDIAN__ 3: mov r1,r3 ! RQPO shll8 r3 ! QPOx mov.l @(r0,r5),r1 ! NMLK mov r1,r6 shlr16 r6 shlr8 r6 ! xxxN or r6,r3 ! QPON cmp/hi r2,r0 bt/s 3b mov.l r3,@-r0 #else 3: mov r1,r3 ! OPQR shlr8 r3 ! xOPQ mov.l @(r0,r5),r1 ! KLMN mov r1,r6 shll16 r6 shll8 r6 ! Nxxx or r6,r3 ! NOPQ cmp/hi r2,r0 bt/s 3b mov.l r3,@-r0 #endif ! ! Third, copy a byte at once, if necessary cmp/eq r4,r0 bt/s 9b add #6,r5 bra 8b add #-6,r2
aixcc-public/challenge-001-exemplar-source
6,298
arch/sh/lib/checksum.S
/* SPDX-License-Identifier: GPL-2.0+ * * $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $ * * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler * * SuperH version: Copyright (C) 1999 Niibe Yutaka */ #include <asm/errno.h> #include <linux/linkage.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum); */ .text ENTRY(csum_partial) /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ mov r4, r0 tst #3, r0 ! Check alignment. bt/s 2f ! Jump if alignment is ok. mov r4, r7 ! Keep a copy to check for alignment ! tst #1, r0 ! Check alignment. bt 21f ! Jump if alignment is boundary of 2bytes. ! buf is odd tst r5, r5 add #-1, r5 bt 9f mov.b @r4+, r0 extu.b r0, r0 addc r0, r6 ! t=0 from previous tst mov r6, r0 shll8 r6 shlr16 r0 shlr8 r0 or r0, r6 mov r4, r0 tst #2, r0 bt 2f 21: ! buf is 2 byte aligned (len could be 0) add #-2, r5 ! Alignment uses up two bytes. cmp/pz r5 ! bt/s 1f ! Jump if we had at least two bytes. clrt bra 6f add #2, r5 ! r5 was < 2. Deal with it. 1: mov.w @r4+, r0 extu.w r0, r0 addc r0, r6 bf 2f add #1, r6 2: ! buf is 4 byte aligned (len could be 0) mov r5, r1 mov #-5, r0 shld r0, r1 tst r1, r1 bt/s 4f ! if it's =0, go to 4f clrt .align 2 3: mov.l @r4+, r0 mov.l @r4+, r2 mov.l @r4+, r3 addc r0, r6 mov.l @r4+, r0 addc r2, r6 mov.l @r4+, r2 addc r3, r6 mov.l @r4+, r3 addc r0, r6 mov.l @r4+, r0 addc r2, r6 mov.l @r4+, r2 addc r3, r6 addc r0, r6 addc r2, r6 movt r0 dt r1 bf/s 3b cmp/eq #1, r0 ! here, we know r1==0 addc r1, r6 ! add carry to r6 4: mov r5, r0 and #0x1c, r0 tst r0, r0 bt 6f ! 4 bytes or more remaining mov r0, r1 shlr2 r1 mov #0, r2 5: addc r2, r6 mov.l @r4+, r2 movt r0 dt r1 bf/s 5b cmp/eq #1, r0 addc r2, r6 addc r1, r6 ! r1==0 here, so it means add carry-bit 6: ! 3 bytes or less remaining mov #3, r0 and r0, r5 tst r5, r5 bt 9f ! if it's =0 go to 9f mov #2, r1 cmp/hs r1, r5 bf 7f mov.w @r4+, r0 extu.w r0, r0 cmp/eq r1, r5 bt/s 8f clrt shll16 r0 addc r0, r6 7: mov.b @r4+, r0 extu.b r0, r0 #ifndef __LITTLE_ENDIAN__ shll8 r0 #endif 8: addc r0, r6 mov #0, r0 addc r0, r6 9: ! Check if the buffer was misaligned, if so realign sum mov r7, r0 tst #1, r0 bt 10f mov r6, r0 shll8 r6 shlr16 r0 shlr8 r0 or r0, r6 10: rts mov r6, r0 /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) */ /* * Copy from ds while checksumming, otherwise like csum_partial with initial * sum being ~0U */ #define EXC(...) \ 9999: __VA_ARGS__ ; \ .section __ex_table, "a"; \ .long 9999b, 6001f ; \ .previous ! ! r4: const char *SRC ! r5: char *DST ! r6: int LEN ! ENTRY(csum_partial_copy_generic) mov #-1,r7 mov #3,r0 ! Check src and dest are equally aligned mov r4,r1 and r0,r1 and r5,r0 cmp/eq r1,r0 bf 3f ! Different alignments, use slow version tst #1,r0 ! Check dest word aligned bf 3f ! If not, do it the slow way mov #2,r0 tst r0,r5 ! Check dest alignment. bt 2f ! Jump if alignment is ok. add #-2,r6 ! Alignment uses up two bytes. cmp/pz r6 ! Jump if we had at least two bytes. bt/s 1f clrt add #2,r6 ! r6 was < 2. Deal with it. bra 4f mov r6,r2 3: ! Handle different src and dest alignments. ! This is not common, so simple byte by byte copy will do. mov r6,r2 shlr r6 tst r6,r6 bt 4f clrt .align 2 5: EXC( mov.b @r4+,r1 ) EXC( mov.b @r4+,r0 ) extu.b r1,r1 EXC( mov.b r1,@r5 ) EXC( mov.b r0,@(1,r5) ) extu.b r0,r0 add #2,r5 #ifdef __LITTLE_ENDIAN__ shll8 r0 #else shll8 r1 #endif or r1,r0 addc r0,r7 movt r0 dt r6 bf/s 5b cmp/eq #1,r0 mov #0,r0 addc r0, r7 mov r2, r0 tst #1, r0 bt 7f bra 5f clrt ! src and dest equally aligned, but to a two byte boundary. ! Handle first two bytes as a special case .align 2 1: EXC( mov.w @r4+,r0 ) EXC( mov.w r0,@r5 ) add #2,r5 extu.w r0,r0 addc r0,r7 mov #0,r0 addc r0,r7 2: mov r6,r2 mov #-5,r0 shld r0,r6 tst r6,r6 bt/s 2f clrt .align 2 1: EXC( mov.l @r4+,r0 ) EXC( mov.l @r4+,r1 ) addc r0,r7 EXC( mov.l r0,@r5 ) EXC( mov.l r1,@(4,r5) ) addc r1,r7 EXC( mov.l @r4+,r0 ) EXC( mov.l @r4+,r1 ) addc r0,r7 EXC( mov.l r0,@(8,r5) ) EXC( mov.l r1,@(12,r5) ) addc r1,r7 EXC( mov.l @r4+,r0 ) EXC( mov.l @r4+,r1 ) addc r0,r7 EXC( mov.l r0,@(16,r5) ) EXC( mov.l r1,@(20,r5) ) addc r1,r7 EXC( mov.l @r4+,r0 ) EXC( mov.l @r4+,r1 ) addc r0,r7 EXC( mov.l r0,@(24,r5) ) EXC( mov.l r1,@(28,r5) ) addc r1,r7 add #32,r5 movt r0 dt r6 bf/s 1b cmp/eq #1,r0 mov #0,r0 addc r0,r7 2: mov r2,r6 mov #0x1c,r0 and r0,r6 cmp/pl r6 bf/s 4f clrt shlr2 r6 3: EXC( mov.l @r4+,r0 ) addc r0,r7 EXC( mov.l r0,@r5 ) add #4,r5 movt r0 dt r6 bf/s 3b cmp/eq #1,r0 mov #0,r0 addc r0,r7 4: mov r2,r6 mov #3,r0 and r0,r6 cmp/pl r6 bf 7f mov #2,r1 cmp/hs r1,r6 bf 5f EXC( mov.w @r4+,r0 ) EXC( mov.w r0,@r5 ) extu.w r0,r0 add #2,r5 cmp/eq r1,r6 bt/s 6f clrt shll16 r0 addc r0,r7 5: EXC( mov.b @r4+,r0 ) EXC( mov.b r0,@r5 ) extu.b r0,r0 #ifndef __LITTLE_ENDIAN__ shll8 r0 #endif 6: addc r0,r7 mov #0,r0 addc r0,r7 7: # Exception handler: .section .fixup, "ax" 6001: rts mov #0,r0 .previous rts mov r7,r0
aixcc-public/challenge-001-exemplar-source
2,867
arch/sh/lib/lshrsi3.S
/* SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0 Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. */ !! libgcc routines for the Renesas / SuperH SH CPUs. !! Contributed by Steve Chamberlain. !! sac@cygnus.com !! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines !! recoded in assembly by Toshiyasu Morita !! tm@netcom.com /* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and ELF local label prefixes by J"orn Rennecke amylaar@cygnus.com */ ! ! __lshrsi3 ! ! Entry: ! ! r4: Value to shift ! r5: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) ! ! __lshrsi3_r0 ! ! Entry: ! ! r0: Value to shift ! r5: Shifts ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! (none) ! .global __lshrsi3 .global __lshrsi3_r0 .align 2 __lshrsi3: mov r5,r0 .align 2 __lshrsi3_r0: and #31,r0 mov.l r4,@-r15 mov r0,r4 mova lshrsi3_table,r0 mov.b @(r0,r4),r4 add r4,r0 jmp @r0 mov.l @r15+,r0 .align 2 lshrsi3_table: .byte lshrsi3_0-lshrsi3_table .byte lshrsi3_1-lshrsi3_table .byte lshrsi3_2-lshrsi3_table .byte lshrsi3_3-lshrsi3_table .byte lshrsi3_4-lshrsi3_table .byte lshrsi3_5-lshrsi3_table .byte lshrsi3_6-lshrsi3_table .byte lshrsi3_7-lshrsi3_table .byte lshrsi3_8-lshrsi3_table .byte lshrsi3_9-lshrsi3_table .byte lshrsi3_10-lshrsi3_table .byte lshrsi3_11-lshrsi3_table .byte lshrsi3_12-lshrsi3_table .byte lshrsi3_13-lshrsi3_table .byte lshrsi3_14-lshrsi3_table .byte lshrsi3_15-lshrsi3_table .byte lshrsi3_16-lshrsi3_table .byte lshrsi3_17-lshrsi3_table .byte lshrsi3_18-lshrsi3_table .byte lshrsi3_19-lshrsi3_table .byte lshrsi3_20-lshrsi3_table .byte lshrsi3_21-lshrsi3_table .byte lshrsi3_22-lshrsi3_table .byte lshrsi3_23-lshrsi3_table .byte lshrsi3_24-lshrsi3_table .byte lshrsi3_25-lshrsi3_table .byte lshrsi3_26-lshrsi3_table .byte lshrsi3_27-lshrsi3_table .byte lshrsi3_28-lshrsi3_table .byte lshrsi3_29-lshrsi3_table .byte lshrsi3_30-lshrsi3_table .byte lshrsi3_31-lshrsi3_table lshrsi3_6: shlr2 r0 lshrsi3_4: shlr2 r0 lshrsi3_2: rts shlr2 r0 lshrsi3_7: shlr2 r0 lshrsi3_5: shlr2 r0 lshrsi3_3: shlr2 r0 lshrsi3_1: rts shlr r0 lshrsi3_14: shlr2 r0 lshrsi3_12: shlr2 r0 lshrsi3_10: shlr2 r0 lshrsi3_8: rts shlr8 r0 lshrsi3_15: shlr2 r0 lshrsi3_13: shlr2 r0 lshrsi3_11: shlr2 r0 lshrsi3_9: shlr8 r0 rts shlr r0 lshrsi3_22: shlr2 r0 lshrsi3_20: shlr2 r0 lshrsi3_18: shlr2 r0 lshrsi3_16: rts shlr16 r0 lshrsi3_23: shlr2 r0 lshrsi3_21: shlr2 r0 lshrsi3_19: shlr2 r0 lshrsi3_17: shlr16 r0 rts shlr r0 lshrsi3_30: shlr2 r0 lshrsi3_28: shlr2 r0 lshrsi3_26: shlr2 r0 lshrsi3_24: shlr16 r0 rts shlr8 r0 lshrsi3_31: shlr2 r0 lshrsi3_29: shlr2 r0 lshrsi3_27: shlr2 r0 lshrsi3_25: shlr16 r0 shlr8 r0 rts shlr r0 lshrsi3_0: rts nop