repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AirFortressIlikara/LS2K0300-linux-4.19
9,057
arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
/* * Header file for multi buffer SHA256 algorithm data structure * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ # Macros for defining data structures # Usage example #START_FIELDS # JOB_AES ### name size align #FIELD _plaintext, 8, 8 # pointer to plaintext #FIELD _ciphertext, 8, 8 # pointer to ciphertext #FIELD _IV, 16, 8 # IV #FIELD _keys, 8, 8 # pointer to keys #FIELD _len, 4, 4 # length in bytes #FIELD _status, 4, 4 # status enumeration #FIELD _user_data, 8, 8 # pointer to user data #UNION _union, size1, align1, \ # size2, align2, \ # size3, align3, \ # ... #END_FIELDS #%assign _JOB_AES_size _FIELD_OFFSET #%assign _JOB_AES_align _STRUCT_ALIGN ######################################################################### # Alternate "struc-like" syntax: # STRUCT job_aes2 # RES_Q .plaintext, 1 # RES_Q .ciphertext, 1 # RES_DQ .IV, 1 # RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN # RES_U .union, size1, align1, \ # size2, align2, \ # ... # ENDSTRUCT # # Following only needed if nesting # %assign job_aes2_size _FIELD_OFFSET # %assign job_aes2_align _STRUCT_ALIGN # # RES_* macros take a name, a count and an optional alignment. # The count in in terms of the base size of the macro, and the # default alignment is the base size. # The macros are: # Macro Base size # RES_B 1 # RES_W 2 # RES_D 4 # RES_Q 8 # RES_DQ 16 # RES_Y 32 # RES_Z 64 # # RES_U defines a union. It's arguments are a name and two or more # pairs of "size, alignment" # # The two assigns are only needed if this structure is being nested # within another. Even if the assigns are not done, one can still use # STRUCT_NAME_size as the size of the structure. # # Note that for nesting, you still need to assign to STRUCT_NAME_size. # # The differences between this and using "struc" directly are that each # type is implicitly aligned to its natural length (although this can be # over-ridden with an explicit third parameter), and that the structure # is padded at the end to its overall alignment. # ######################################################################### #ifndef _DATASTRUCT_ASM_ #define _DATASTRUCT_ASM_ #define PTR_SZ 8 #define SHA512_DIGEST_WORD_SIZE 8 #define SHA512_MB_MGR_NUM_LANES_AVX2 4 #define NUM_SHA512_DIGEST_WORDS 8 #define SZ4 4*SHA512_DIGEST_WORD_SIZE #define ROUNDS 80*SZ4 #define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8) # START_FIELDS .macro START_FIELDS _FIELD_OFFSET = 0 _STRUCT_ALIGN = 0 .endm # FIELD name size align .macro FIELD name size align _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) \name = _FIELD_OFFSET _FIELD_OFFSET = _FIELD_OFFSET + (\size) .if (\align > _STRUCT_ALIGN) _STRUCT_ALIGN = \align .endif .endm # END_FIELDS .macro END_FIELDS _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) .endm .macro STRUCT p1 START_FIELDS .struc \p1 .endm .macro ENDSTRUCT tmp = _FIELD_OFFSET END_FIELDS tmp = (_FIELD_OFFSET - ##tmp) .if (tmp > 0) .lcomm tmp .endm ## RES_int name size align .macro RES_int p1 p2 p3 name = \p1 size = \p2 align = .\p3 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) .align align .lcomm name size _FIELD_OFFSET = _FIELD_OFFSET + (size) .if (align > _STRUCT_ALIGN) _STRUCT_ALIGN = align .endif .endm # macro RES_B name, size [, align] .macro RES_B _name, _size, _align=1 RES_int _name _size _align .endm # macro RES_W name, size [, align] .macro RES_W _name, _size, _align=2 RES_int _name 2*(_size) _align .endm # macro RES_D name, size [, align] .macro RES_D _name, _size, _align=4 RES_int _name 4*(_size) _align .endm # macro RES_Q name, size [, align] .macro RES_Q _name, _size, _align=8 RES_int _name 8*(_size) _align .endm # macro RES_DQ name, size [, align] .macro RES_DQ _name, _size, _align=16 RES_int _name 16*(_size) _align .endm # macro RES_Y name, size [, align] .macro RES_Y _name, _size, _align=32 RES_int _name 32*(_size) _align .endm # macro RES_Z name, size [, align] .macro RES_Z _name, _size, _align=64 RES_int _name 64*(_size) _align .endm #endif ################################################################### ### Define SHA512 Out Of Order Data Structures ################################################################### START_FIELDS # LANE_DATA ### name size align FIELD _job_in_lane, 8, 8 # pointer to job object END_FIELDS _LANE_DATA_size = _FIELD_OFFSET _LANE_DATA_align = _STRUCT_ALIGN #################################################################### START_FIELDS # SHA512_ARGS_X4 ### name size align FIELD _digest, 8*8*4, 4 # transposed digest FIELD _data_ptr, 8*4, 8 # array of pointers to data END_FIELDS _SHA512_ARGS_X4_size = _FIELD_OFFSET _SHA512_ARGS_X4_align = _STRUCT_ALIGN ##################################################################### START_FIELDS # MB_MGR ### name size align FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align FIELD _lens, 8*4, 8 FIELD _unused_lanes, 8, 8 FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align END_FIELDS _MB_MGR_size = _FIELD_OFFSET _MB_MGR_align = _STRUCT_ALIGN _args_digest = _args + _digest _args_data_ptr = _args + _data_ptr ####################################################################### ####################################################################### #### Define constants ####################################################################### #define STS_UNKNOWN 0 #define STS_BEING_PROCESSED 1 #define STS_COMPLETED 2 ####################################################################### #### Define JOB_SHA512 structure ####################################################################### START_FIELDS # JOB_SHA512 ### name size align FIELD _buffer, 8, 8 # pointer to buffer FIELD _len, 8, 8 # length in bytes FIELD _result_digest, 8*8, 32 # Digest (output) FIELD _status, 4, 4 FIELD _user_data, 8, 8 END_FIELDS _JOB_SHA512_size = _FIELD_OFFSET _JOB_SHA512_align = _STRUCT_ALIGN
AirFortressIlikara/LS2K0300-linux-4.19
8,848
arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
/* * Flush routine for SHA512 multibuffer * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha512_mb_mgr_datastruct.S" .extern sha512_x4_avx2 # LINUX register definitions #define arg1 %rdi #define arg2 %rsi # idx needs to be other than arg1, arg2, rbx, r12 #define idx %rdx # Common definitions #define state arg1 #define job arg2 #define len2 arg2 #define unused_lanes %rbx #define lane_data %rbx #define tmp2 %rbx #define job_rax %rax #define tmp1 %rax #define size_offset %rax #define tmp %rax #define start_offset %rax #define tmp3 arg1 #define extra_blocks arg2 #define p arg2 #define tmp4 %r8 #define lens0 %r8 #define lens1 %r9 #define lens2 %r10 #define lens3 %r11 .macro LABEL prefix n \prefix\n\(): .endm .macro JNE_SKIP i jne skip_\i .endm .altmacro .macro SET_OFFSET _offset offset = \_offset .endm .noaltmacro # JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state) # arg 1 : rcx : state ENTRY(sha512_mb_mgr_flush_avx2) FRAME_BEGIN push %rbx # If bit (32+3) is set, then all lanes are empty mov _unused_lanes(state), unused_lanes bt $32+7, unused_lanes jc return_null # find a lane with a non-null job xor idx, idx offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne one(%rip), idx offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne two(%rip), idx offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) cmovne three(%rip), idx # copy idx to empty lanes copy_lane_data: offset = (_args + _data_ptr) mov offset(state,idx,8), tmp I = 0 .rep 4 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) cmpq $0, offset(state) .altmacro JNE_SKIP %I offset = (_args + _data_ptr + 8*I) mov tmp, offset(state) offset = (_lens + 8*I +4) movl $0xFFFFFFFF, offset(state) LABEL skip_ %I I = (I+1) .noaltmacro .endr # Find min length mov _lens + 0*8(state),lens0 mov lens0,idx mov _lens + 1*8(state),lens1 cmp idx,lens1 cmovb lens1,idx mov _lens + 2*8(state),lens2 cmp idx,lens2 cmovb lens2,idx mov _lens + 3*8(state),lens3 cmp idx,lens3 cmovb lens3,idx mov idx,len2 and $0xF,idx and $~0xFF,len2 jz len_is_0 sub len2, lens0 sub len2, lens1 sub len2, lens2 sub len2, lens3 shr $32,len2 mov lens0, _lens + 0*8(state) mov lens1, _lens + 1*8(state) mov lens2, _lens + 2*8(state) mov lens3, _lens + 3*8(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha512_x4_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $8, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens+4(state, idx, 8) vmovq _args_digest+0*32(state, idx, 8), %xmm0 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 vmovq _args_digest+2*32(state, idx, 8), %xmm1 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 vmovq _args_digest+4*32(state, idx, 8), %xmm2 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 vmovq _args_digest+6*32(state, idx, 8), %xmm3 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 vmovdqu %xmm0, _result_digest(job_rax) vmovdqu %xmm1, _result_digest+1*16(job_rax) vmovdqu %xmm2, _result_digest+2*16(job_rax) vmovdqu %xmm3, _result_digest+3*16(job_rax) return: pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha512_mb_mgr_flush_avx2) .align 16 ENTRY(sha512_mb_mgr_get_comp_job_avx2) push %rbx mov _unused_lanes(state), unused_lanes bt $(32+7), unused_lanes jc .return_null # Find min length mov _lens(state),lens0 mov lens0,idx mov _lens+1*8(state),lens1 cmp idx,lens1 cmovb lens1,idx mov _lens+2*8(state),lens2 cmp idx,lens2 cmovb lens2,idx mov _lens+3*8(state),lens3 cmp idx,lens3 cmovb lens3,idx test $~0xF,idx jnz .return_null and $0xF,idx #process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) mov _unused_lanes(state), unused_lanes shl $8, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF, _lens+4(state, idx, 8) vmovq _args_digest(state, idx, 8), %xmm0 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 vmovq _args_digest+2*32(state, idx, 8), %xmm1 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 vmovq _args_digest+4*32(state, idx, 8), %xmm2 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 vmovq _args_digest+6*32(state, idx, 8), %xmm3 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 vmovdqu %xmm0, _result_digest+0*16(job_rax) vmovdqu %xmm1, _result_digest+1*16(job_rax) vmovdqu %xmm2, _result_digest+2*16(job_rax) vmovdqu %xmm3, _result_digest+3*16(job_rax) pop %rbx ret .return_null: xor job_rax, job_rax pop %rbx ret ENDPROC(sha512_mb_mgr_get_comp_job_avx2) .section .rodata.cst8.one, "aM", @progbits, 8 .align 8 one: .quad 1 .section .rodata.cst8.two, "aM", @progbits, 8 .align 8 two: .quad 2 .section .rodata.cst8.three, "aM", @progbits, 8 .align 8 three: .quad 3
AirFortressIlikara/LS2K0300-linux-4.19
6,758
arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
/* * Buffer submit code for multi buffer SHA512 algorithm * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Megha Dey <megha.dey@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/linkage.h> #include <asm/frame.h> #include "sha512_mb_mgr_datastruct.S" .extern sha512_x4_avx2 #define arg1 %rdi #define arg2 %rsi #define idx %rdx #define last_len %rdx #define size_offset %rcx #define tmp2 %rcx # Common definitions #define state arg1 #define job arg2 #define len2 arg2 #define p2 arg2 #define p %r11 #define start_offset %r11 #define unused_lanes %rbx #define job_rax %rax #define len %rax #define lane %r12 #define tmp3 %r12 #define lens3 %r12 #define extra_blocks %r8 #define lens0 %r8 #define tmp %r9 #define lens1 %r9 #define lane_data %r10 #define lens2 %r10 #define DWORD_len %eax # JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job) # arg 1 : rcx : state # arg 2 : rdx : job ENTRY(sha512_mb_mgr_submit_avx2) FRAME_BEGIN push %rbx push %r12 mov _unused_lanes(state), unused_lanes movzb %bl,lane shr $8, unused_lanes imul $_LANE_DATA_size, lane,lane_data movl $STS_BEING_PROCESSED, _status(job) lea _ldata(state, lane_data), lane_data mov unused_lanes, _unused_lanes(state) movl _len(job), DWORD_len mov job, _job_in_lane(lane_data) movl DWORD_len,_lens+4(state , lane, 8) # Load digest words from result_digest vmovdqu _result_digest+0*16(job), %xmm0 vmovdqu _result_digest+1*16(job), %xmm1 vmovdqu _result_digest+2*16(job), %xmm2 vmovdqu _result_digest+3*16(job), %xmm3 vmovq %xmm0, _args_digest(state, lane, 8) vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8) vmovq %xmm1, _args_digest+2*32(state , lane, 8) vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8) vmovq %xmm2, _args_digest+4*32(state , lane, 8) vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8) vmovq %xmm3, _args_digest+6*32(state , lane, 8) vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8) mov _buffer(job), p mov p, _args_data_ptr(state, lane, 8) cmp $0xFF, unused_lanes jne return_null start_loop: # Find min length mov _lens+0*8(state),lens0 mov lens0,idx mov _lens+1*8(state),lens1 cmp idx,lens1 cmovb lens1, idx mov _lens+2*8(state),lens2 cmp idx,lens2 cmovb lens2,idx mov _lens+3*8(state),lens3 cmp idx,lens3 cmovb lens3,idx mov idx,len2 and $0xF,idx and $~0xFF,len2 jz len_is_0 sub len2,lens0 sub len2,lens1 sub len2,lens2 sub len2,lens3 shr $32,len2 mov lens0, _lens + 0*8(state) mov lens1, _lens + 1*8(state) mov lens2, _lens + 2*8(state) mov lens3, _lens + 3*8(state) # "state" and "args" are the same address, arg1 # len is arg2 call sha512_x4_avx2 # state and idx are intact len_is_0: # process completed job "idx" imul $_LANE_DATA_size, idx, lane_data lea _ldata(state, lane_data), lane_data mov _job_in_lane(lane_data), job_rax mov _unused_lanes(state), unused_lanes movq $0, _job_in_lane(lane_data) movl $STS_COMPLETED, _status(job_rax) shl $8, unused_lanes or idx, unused_lanes mov unused_lanes, _unused_lanes(state) movl $0xFFFFFFFF,_lens+4(state,idx,8) vmovq _args_digest+0*32(state , idx, 8), %xmm0 vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0 vmovq _args_digest+2*32(state , idx, 8), %xmm1 vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1 vmovq _args_digest+4*32(state , idx, 8), %xmm2 vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2 vmovq _args_digest+6*32(state , idx, 8), %xmm3 vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3 vmovdqu %xmm0, _result_digest + 0*16(job_rax) vmovdqu %xmm1, _result_digest + 1*16(job_rax) vmovdqu %xmm2, _result_digest + 2*16(job_rax) vmovdqu %xmm3, _result_digest + 3*16(job_rax) return: pop %r12 pop %rbx FRAME_END ret return_null: xor job_rax, job_rax jmp return ENDPROC(sha512_mb_mgr_submit_avx2) /* UNUSED? .section .rodata.cst16, "aM", @progbits, 16 .align 16 H0: .int 0x6a09e667 H1: .int 0xbb67ae85 H2: .int 0x3c6ef372 H3: .int 0xa54ff53a H4: .int 0x510e527f H5: .int 0x9b05688c H6: .int 0x1f83d9ab H7: .int 0x5be0cd19 */
AirFortressIlikara/LS2K0300-linux-4.19
2,348
arch/x86/boot/compressed/efi_stub_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub for IA32. * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. Note that this implementation is different from the one in * arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical * mode at this point. */ #include <linux/linkage.h> #include <asm/page_types.h> /* * efi_call_phys(void *, ...) is a function with variable parameters. * All the callers of this function assure that all the parameters are 4-bytes. */ /* * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. * So we'd better save all of them at the beginning of this function and restore * at the end no matter how many we use, because we can not assure EFI runtime * service functions will comply with gcc calling convention, too. */ .text ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * the values of these registers are the same. And, the corresponding * GDT entries are identical. So I will do nothing about segment reg * and GDT, but change GDT base register in prelog and epilog. */ /* * 1. Because we haven't been relocated by this point we need to * use relative addressing. */ call 1f 1: popl %edx subl $1b, %edx /* * 2. Now on the top of stack is the return * address in the caller of efi_call_phys(), then parameter 1, * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ popl %ecx movl %ecx, saved_return_addr(%edx) /* get the function pointer into ECX*/ popl %ecx movl %ecx, efi_rt_function_ptr(%edx) /* * 3. Call the physical function. */ call *%ecx /* * 4. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. We need to calculate our address * again because %ecx and %edx are not preserved across EFI function * calls. */ call 1f 1: popl %edx subl $1b, %edx movl efi_rt_function_ptr(%edx), %ecx pushl %ecx /* * 10. Push the saved return address onto the stack and return. */ movl saved_return_addr(%edx), %ecx pushl %ecx ret ENDPROC(efi_call_phys) .previous .data saved_return_addr: .long 0 efi_rt_function_ptr: .long 0
AirFortressIlikara/LS2K0300-linux-4.19
6,303
arch/x86/boot/compressed/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/boot.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> /* * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X * relocation to get the symbol address in PIC. When the compressed x86 * kernel isn't built as PIC, the linker optimizes R_386_GOT32X * relocations to their fixed symbol addresses. However, when the * compressed x86 kernel is loaded at a different address, it leads * to the following load failure: * * Failed to allocate space for phdrs * * during the decompression stage. * * If the compressed x86 kernel is relocatable at run-time, it should be * compiled with -fPIE, instead of -fPIC, if possible and should be built as * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as * hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot .hidden _end __HEAD ENTRY(startup_32) cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 1f cli movl $__BOOT_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss 1: /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp /* * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ movl BP_init_size(%esi), %eax subl $_end, %eax addl %eax, %ebx /* Set up the stack */ leal boot_stack_end(%ebx), %esp /* Zero EFLAGS */ pushl $0 popfl /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushl %esi leal (_bss-4)(%ebp), %esi leal (_bss-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std rep movsl cld popl %esi /* * Jump to the relocated address. */ leal relocated(%ebx), %eax jmp *%eax ENDPROC(startup_32) #ifdef CONFIG_EFI_STUB /* * We don't need the return address, so set up the stack so efi_main() can find * its arguments. */ ENTRY(efi_pe_entry) add $0x4, %esp call 1f 1: popl %esi subl $1b, %esi popl %ecx movl %ecx, efi32_config(%esi) /* Handle */ popl %ecx movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */ /* Relocate efi_config->call() */ leal efi32_config(%esi), %eax add %esi, 40(%eax) pushl %eax call make_boot_params cmpl $0, %eax je fail movl %esi, BP_code32_start(%eax) popl %ecx pushl %eax pushl %ecx jmp 2f /* Skip efi_config initialization */ ENDPROC(efi_pe_entry) ENTRY(efi32_stub_entry) add $0x4, %esp popl %ecx popl %edx call 1f 1: popl %esi subl $1b, %esi movl %ecx, efi32_config(%esi) /* Handle */ movl %edx, efi32_config+8(%esi) /* EFI System table pointer */ /* Relocate efi_config->call() */ leal efi32_config(%esi), %eax add %esi, 40(%eax) pushl %eax 2: call efi_main cmpl $0, %eax movl %eax, %esi jne 2f fail: /* EFI init failed, so hang. */ hlt jmp fail 2: movl BP_code32_start(%esi), %eax leal startup_32(%eax), %eax jmp *%eax ENDPROC(efi32_stub_entry) #endif .text relocated: /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leal _bss(%ebx), %edi leal _ebss(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* * Adjust our own GOT */ leal _got(%ebx), %edx leal _egot(%ebx), %ecx 1: cmpl %ecx, %edx jae 2f addl %ebx, (%edx) addl $4, %edx jmp 1b 2: /* * Do the extraction, and jump to the new kernel.. */ /* push arguments for extract_kernel: */ pushl $z_output_len /* decompressed length, end of relocs */ movl BP_init_size(%esi), %eax subl $_end, %eax movl %ebx, %ebp subl %eax, %ebp pushl %ebp /* output address */ pushl $z_input_len /* input_len */ leal input_data(%ebx), %eax pushl %eax /* input_data */ leal boot_heap(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call extract_kernel /* returns kernel location in %eax */ addl $24, %esp /* * Jump to the extracted kernel. */ xorl %ebx, %ebx jmp *%eax #ifdef CONFIG_EFI_STUB .data efi32_config: .fill 5,8,0 .long efi_call_phys .long 0 .byte 0 #endif /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end:
AirFortressIlikara/LS2K0300-linux-4.19
3,694
arch/x86/boot/compressed/efi_thunk_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming * * Early support for invoking 32-bit EFI services from a 64-bit kernel. * * Because this thunking occurs before ExitBootServices() we have to * restore the firmware's 32-bit GDT before we make EFI serivce calls, * since the firmware's 32-bit IDT is still currently installed and it * needs to be able to service interrupts. * * On the plus side, we don't have to worry about mangling 64-bit * addresses into 32-bits because we're executing with an identify * mapped pagetable and haven't transitioned to 64-bit virtual addresses * yet. */ #include <linux/linkage.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/segment.h> .code64 .text ENTRY(efi64_thunk) push %rbp push %rbx subq $8, %rsp leaq efi_exit32(%rip), %rax movl %eax, 4(%rsp) leaq efi_gdt64(%rip), %rax movl %eax, (%rsp) movl %eax, 2(%rax) /* Fixup the gdt base address */ movl %ds, %eax push %rax movl %es, %eax push %rax movl %ss, %eax push %rax /* * Convert x86-64 ABI params to i386 ABI */ subq $32, %rsp movl %esi, 0x0(%rsp) movl %edx, 0x4(%rsp) movl %ecx, 0x8(%rsp) movq %r8, %rsi movl %esi, 0xc(%rsp) movq %r9, %rsi movl %esi, 0x10(%rsp) sgdt save_gdt(%rip) leaq 1f(%rip), %rbx movq %rbx, func_rt_ptr(%rip) /* * Switch to gdt with 32-bit segments. This is the firmware GDT * that was installed when the kernel started executing. This * pointer was saved at the EFI stub entry point in head_64.S. */ leaq efi32_boot_gdt(%rip), %rax lgdt (%rax) pushq $__KERNEL_CS leaq efi_enter32(%rip), %rax pushq %rax lretq 1: addq $32, %rsp lgdt save_gdt(%rip) pop %rbx movl %ebx, %ss pop %rbx movl %ebx, %es pop %rbx movl %ebx, %ds /* * Convert 32-bit status code into 64-bit. */ test %rax, %rax jz 1f movl %eax, %ecx andl $0x0fffffff, %ecx andl $0xf0000000, %eax shl $32, %rax or %rcx, %rax 1: addq $8, %rsp pop %rbx pop %rbp ret ENDPROC(efi64_thunk) ENTRY(efi_exit32) movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax ret ENDPROC(efi_exit32) .code32 /* * EFI service pointer must be in %edi. * * The stack should represent the 32-bit calling convention. */ ENTRY(efi_enter32) movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss /* Reload pgtables */ movl %cr3, %eax movl %eax, %cr3 /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Disable long mode via EFER */ movl $MSR_EFER, %ecx rdmsr btrl $_EFER_LME, %eax wrmsr call *%edi /* We must preserve return value */ movl %eax, %edi /* * Some firmware will return with interrupts enabled. Be sure to * disable them before we switch GDTs. */ cli movl 56(%esp), %eax movl %eax, 2(%eax) lgdtl (%eax) movl %cr4, %eax btsl $(X86_CR4_PAE_BIT), %eax movl %eax, %cr4 movl %cr3, %eax movl %eax, %cr3 movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr xorl %eax, %eax lldt %ax movl 60(%esp), %eax pushl $__KERNEL_CS pushl %eax /* Enable paging */ movl %cr0, %eax btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret ENDPROC(efi_enter32) .data .balign 8 .global efi32_boot_gdt efi32_boot_gdt: .word 0 .quad 0 save_gdt: .word 0 .quad 0 func_rt_ptr: .quad 0 .global efi_gdt64 efi_gdt64: .word efi_gdt64_end - efi_gdt64 .long 0 /* Filled out by user */ .word 0 .quad 0x0000000000000000 /* NULL descriptor */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ efi_gdt64_end:
AirFortressIlikara/LS2K0300-linux-4.19
2,043
arch/x86/boot/compressed/mem_encrypt.S
/* * AMD Memory Encryption Support * * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/asm-offsets.h> .text .code32 ENTRY(get_sev_encryption_bit) xor %eax, %eax #ifdef CONFIG_AMD_MEM_ENCRYPT push %ebx push %ecx push %edx /* Check if running under a hypervisor */ movl $1, %eax cpuid bt $31, %ecx /* Check the hypervisor bit */ jnc .Lno_sev movl $0x80000000, %eax /* CPUID to check the highest leaf */ cpuid cmpl $0x8000001f, %eax /* See if 0x8000001f is available */ jb .Lno_sev /* * Check for the SEV feature: * CPUID Fn8000_001F[EAX] - Bit 1 * CPUID Fn8000_001F[EBX] - Bits 5:0 * Pagetable bit position used to indicate encryption */ movl $0x8000001f, %eax cpuid bt $1, %eax /* Check if SEV is available */ jnc .Lno_sev movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */ rdmsr bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */ jnc .Lno_sev movl %ebx, %eax andl $0x3f, %eax /* Return the encryption bit location */ jmp .Lsev_exit .Lno_sev: xor %eax, %eax .Lsev_exit: pop %edx pop %ecx pop %ebx #endif /* CONFIG_AMD_MEM_ENCRYPT */ ret ENDPROC(get_sev_encryption_bit) .code64 ENTRY(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp push %rdx movq %rsp, %rbp /* Save current stack pointer */ call get_sev_encryption_bit /* Get the encryption bit position */ testl %eax, %eax jz .Lno_sev_mask bts %rax, sme_me_mask(%rip) /* Create the encryption mask */ .Lno_sev_mask: movq %rbp, %rsp /* Restore original stack pointer */ pop %rdx pop %rbp #endif xor %rax, %rax ret ENDPROC(set_sev_encryption_mask) .data #ifdef CONFIG_AMD_MEM_ENCRYPT .balign 8 GLOBAL(sme_me_mask) .quad 0 #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,216
arch/x86/boot/compressed/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #undef i386 #include <asm/cache.h> #include <asm/page_types.h> #ifdef CONFIG_X86_64 OUTPUT_ARCH(i386:x86-64) ENTRY(startup_64) #else OUTPUT_ARCH(i386) ENTRY(startup_32) #endif SECTIONS { /* Be careful parts of head_64.S assume startup_32 is at * address 0. */ . = 0; .head.text : { _head = . ; HEAD_TEXT _ehead = . ; } .rodata..compressed : { *(.rodata..compressed) } .text : { _text = .; /* Text */ *(.text) *(.text.*) _etext = . ; } .rodata : { _rodata = . ; *(.rodata) /* read-only data */ *(.rodata.*) _erodata = . ; } .got : { _got = .; KEEP(*(.got.plt)) KEEP(*(.got)) _egot = .; } .data : { _data = . ; *(.data) *(.data.*) _edata = . ; } . = ALIGN(L1_CACHE_BYTES); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) . = ALIGN(8); /* For convenience during zeroing */ _ebss = .; } #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); .pgtable : { _pgtable = . ; *(.pgtable) _epgtable = . ; } #endif . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ _end = .; }
AirFortressIlikara/LS2K0300-linux-4.19
16,639
arch/x86/boot/compressed/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .code32 .text #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/boot.h> #include <asm/msr.h> #include <asm/processor-flags.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> #include "pgtable.h" /* * Locally defined symbols should be marked hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot .hidden _end __HEAD .code32 ENTRY(startup_32) /* * 32bit entry is 0 and it is ABI so immutable! * If we come here directly from a bootloader, * kernel(text+data+bss+brk) ramdisk, zero_page, command line * all need to be under the 4G limit. */ cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 1f cli movl $(__BOOT_DS), %eax movl %eax, %ds movl %eax, %es movl %eax, %ss 1: /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp /* setup a stack and make sure cpu supports long mode. */ movl $boot_stack_end, %eax addl %ebp, %eax movl %eax, %esp call verify_cpu testl %eax, %eax jnz no_longmode /* * Compute the delta between where we were compiled to run at * and where the code will actually run at. * * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx movl BP_kernel_alignment(%esi), %eax decl %eax addl %eax, %ebx notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: /* Target address to relocate to for decompression */ movl BP_init_size(%esi), %eax subl $_end, %eax addl %eax, %ebx /* * Prepare for entering 64 bit mode */ /* Load new GDT with the 64bit segments using 32bit descriptor */ addl %ebp, gdt+2(%ebp) lgdt gdt(%ebp) /* Enable PAE mode */ movl %cr4, %eax orl $X86_CR4_PAE, %eax movl %eax, %cr4 /* * Build early 4G boot pagetable */ /* * If SEV is active then set the encryption mask in the page tables. * This will insure that when the kernel is copied and decompressed * it will be done so encrypted. */ call get_sev_encryption_bit xorl %edx, %edx testl %eax, %eax jz 1f subl $32, %eax /* Encryption bit is always above bit 31 */ bts %eax, %edx /* Set encryption mask for page tables */ 1: /* Initialize Page tables to 0 */ leal pgtable(%ebx), %edi xorl %eax, %eax movl $(BOOT_INIT_PGT_SIZE/4), %ecx rep stosl /* Build Level 4 */ leal pgtable + 0(%ebx), %edi leal 0x1007 (%edi), %eax movl %eax, 0(%edi) addl %edx, 4(%edi) /* Build Level 3 */ leal pgtable + 0x1000(%ebx), %edi leal 0x1007(%edi), %eax movl $4, %ecx 1: movl %eax, 0x00(%edi) addl %edx, 0x04(%edi) addl $0x00001000, %eax addl $8, %edi decl %ecx jnz 1b /* Build Level 2 */ leal pgtable + 0x2000(%ebx), %edi movl $0x00000183, %eax movl $2048, %ecx 1: movl %eax, 0(%edi) addl %edx, 4(%edi) addl $0x00200000, %eax addl $8, %edi decl %ecx jnz 1b /* Enable the boot page tables */ leal pgtable(%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* After gdt is loaded */ xorl %eax, %eax lldt %ax movl $__BOOT_TSS, %eax ltr %ax /* * Setup for the jump to 64bit mode * * When the jump is performend we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use * the new gdt/idt that has __KERNEL_CS with CS.L = 1. * We place all of the values on our mini stack so lret can * used to perform that far jump. */ pushl $__KERNEL_CS leal startup_64(%ebp), %eax #ifdef CONFIG_EFI_MIXED movl efi32_config(%ebp), %ebx cmp $0, %ebx jz 1f leal handover_entry(%ebp), %eax 1: #endif pushl %eax /* Enter paged protected Mode, activating Long Mode */ movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */ movl %eax, %cr0 /* Jump from 32bit compatibility mode into 64bit mode. */ lret ENDPROC(startup_32) #ifdef CONFIG_EFI_MIXED .org 0x190 ENTRY(efi32_stub_entry) add $0x4, %esp /* Discard return address */ popl %ecx popl %edx popl %esi leal (BP_scratch+4)(%esi), %esp call 1f 1: pop %ebp subl $1b, %ebp movl %ecx, efi32_config(%ebp) movl %edx, efi32_config+8(%ebp) sgdtl efi32_boot_gdt(%ebp) leal efi32_config(%ebp), %eax movl %eax, efi_config(%ebp) /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 jmp startup_32 ENDPROC(efi32_stub_entry) #endif .code64 .org 0x200 ENTRY(startup_64) /* * 64bit entry is 0x200 and it is ABI so immutable! * We come here either from startup_32 or directly from a * 64bit bootloader. * If we come here from a bootloader, kernel(text+data+bss+brk), * ramdisk, zero_page, command line could be above 4G. * We depend on an identity mapped page table being provided * that maps our entire kernel(text+data+bss+brk), zero page * and command line. */ /* Setup data segments. */ xorl %eax, %eax movl %eax, %ds movl %eax, %es movl %eax, %ss movl %eax, %fs movl %eax, %gs /* * Compute the decompressed kernel start address. It is where * we were loaded at aligned to a 2M boundary. %rbp contains the * decompressed kernel start address. * * If it is a relocatable kernel then decompress and run the kernel * from load address aligned to 2MB addr, otherwise decompress and * run the kernel from LOAD_PHYSICAL_ADDR * * We cannot rely on the calculation done in 32-bit mode, since we * may have been invoked via the 64-bit entry point. */ /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaq startup_32(%rip) /* - $startup_32 */, %rbp movl BP_kernel_alignment(%rsi), %eax decl %eax addq %rax, %rbp notq %rax andq %rax, %rbp cmpq $LOAD_PHYSICAL_ADDR, %rbp jae 1f #endif movq $LOAD_PHYSICAL_ADDR, %rbp 1: /* Target address to relocate to for decompression */ movl BP_init_size(%rsi), %ebx subl $_end, %ebx addq %rbp, %rbx /* Set up the stack */ leaq boot_stack_end(%rbx), %rsp /* * paging_prepare() and cleanup_trampoline() below can have GOT * references. Adjust the table with address we are running at. * * Zero RAX for adjust_got: the GOT was not adjusted before; * there's no adjustment to undo. */ xorq %rax, %rax /* * Calculate the address the binary is loaded at and use it as * a GOT adjustment. */ call 1f 1: popq %rdi subq $1b, %rdi call adjust_got /* * At this point we are in long mode with 4-level paging enabled, * but we might want to enable 5-level paging or vice versa. * * The problem is that we cannot do it directly. Setting or clearing * CR4.LA57 in long mode would trigger #GP. So we need to switch off * long mode and paging first. * * We also need a trampoline in lower memory to switch over from * 4- to 5-level paging for cases when the bootloader puts the kernel * above 4G, but didn't enable 5-level paging for us. * * The same trampoline can be used to switch from 5- to 4-level paging * mode, like when starting 4-level paging kernel via kexec() when * original kernel worked in 5-level paging mode. * * For the trampoline, we need the top page table to reside in lower * memory as we don't have a way to load 64-bit values into CR3 in * 32-bit mode. * * We go though the trampoline even if we don't have to: if we're * already in a desired paging mode. This way the trampoline code gets * tested on every boot. */ /* Make sure we have GDT with 32-bit code segment */ leaq gdt(%rip), %rax movq %rax, gdt64+2(%rip) lgdt gdt64(%rip) /* * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. * * Address of the trampoline is returned in RAX. * Non zero RDX on return means we need to enable 5-level paging. * * RSI holds real mode data and needs to be preserved across * this function call. */ pushq %rsi movq %rsi, %rdi /* real mode address */ call paging_prepare popq %rsi /* Save the trampoline address in RCX */ movq %rax, %rcx /* * Load the address of trampoline_return() into RDI. * It will be used by the trampoline to return to the main code. */ leaq trampoline_return(%rip), %rdi /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ pushq $__KERNEL32_CS leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax pushq %rax lretq trampoline_return: /* Restore the stack, the 32-bit trampoline uses its own stack */ leaq boot_stack_end(%rbx), %rsp /* * cleanup_trampoline() would restore trampoline memory. * * RDI is address of the page table to use instead of page table * in trampoline memory (if required). * * RSI holds real mode data and needs to be preserved across * this function call. */ pushq %rsi leaq top_pgtable(%rbx), %rdi call cleanup_trampoline popq %rsi /* Zero EFLAGS */ pushq $0 popfq /* * Previously we've adjusted the GOT with address the binary was * loaded at. Now we need to re-adjust for relocation address. * * Calculate the address the binary is loaded at, so that we can * undo the previous GOT adjustment. */ call 1f 1: popq %rax subq $1b, %rax /* The new adjustment is the relocation address */ movq %rbx, %rdi call adjust_got /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushq %rsi leaq (_bss-8)(%rip), %rsi leaq (_bss-8)(%rbx), %rdi movq $_bss /* - $startup_32 */, %rcx shrq $3, %rcx std rep movsq cld popq %rsi /* * Jump to the relocated address. */ leaq relocated(%rbx), %rax jmp *%rax #ifdef CONFIG_EFI_STUB /* The entry point for the PE/COFF executable is efi_pe_entry. */ ENTRY(efi_pe_entry) movq %rcx, efi64_config(%rip) /* Handle */ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ leaq efi64_config(%rip), %rax movq %rax, efi_config(%rip) call 1f 1: popq %rbp subq $1b, %rbp /* * Relocate efi_config->call(). */ addq %rbp, efi64_config+40(%rip) movq %rax, %rdi call make_boot_params cmpq $0,%rax je fail mov %rax, %rsi leaq startup_32(%rip), %rax movl %eax, BP_code32_start(%rsi) jmp 2f /* Skip the relocation */ handover_entry: call 1f 1: popq %rbp subq $1b, %rbp /* * Relocate efi_config->call(). */ movq efi_config(%rip), %rax addq %rbp, 40(%rax) 2: movq efi_config(%rip), %rdi call efi_main movq %rax,%rsi cmpq $0,%rax jne 2f fail: /* EFI init failed, so hang. */ hlt jmp fail 2: movl BP_code32_start(%esi), %eax leaq startup_64(%rax), %rax jmp *%rax ENDPROC(efi_pe_entry) .org 0x390 ENTRY(efi64_stub_entry) movq %rdi, efi64_config(%rip) /* Handle */ movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ leaq efi64_config(%rip), %rax movq %rax, efi_config(%rip) movq %rdx, %rsi jmp handover_entry ENDPROC(efi64_stub_entry) #endif .text relocated: /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leaq _bss(%rip), %rdi leaq _ebss(%rip), %rcx subq %rdi, %rcx shrq $3, %rcx rep stosq /* * Do the extraction, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ movq $z_output_len, %r9 /* decompressed length, end of relocs */ call extract_kernel /* returns kernel location in %rax */ popq %rsi /* * Jump to the decompressed kernel. */ jmp *%rax /* * Adjust the global offset table * * RAX is the previous adjustment of the table to undo (use 0 if it's the * first time we touch GOT). * RDI is the new adjustment to apply. */ adjust_got: /* Walk through the GOT adding the address to the entries */ leaq _got(%rip), %rdx leaq _egot(%rip), %rcx 1: cmpq %rcx, %rdx jae 2f subq %rax, (%rdx) /* Undo previous adjustment */ addq %rdi, (%rdx) /* Apply the new adjustment */ addq $8, %rdx jmp 1b 2: ret .code32 /* * This is the 32-bit trampoline that will be copied over to low memory. * * RDI contains the return address (might be above 4G). * ECX contains the base address of the trampoline memory. * Non zero RDX on return means we need to enable 5-level paging. */ ENTRY(trampoline_32bit_src) /* Set up data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %ss /* Set up new stack */ leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 /* Check what paging mode we want to be in after the trampoline */ cmpl $0, %edx jz 1f /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ movl %cr4, %eax testl $X86_CR4_LA57, %eax jnz 3f jmp 2f 1: /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ movl %cr4, %eax testl $X86_CR4_LA57, %eax jz 3f 2: /* Point CR3 to the trampoline's new top level page table */ leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax movl %eax, %cr3 3: /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ pushl %ecx pushl %edx movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr popl %edx popl %ecx /* Enable PAE and LA57 (if required) paging modes */ movl $X86_CR4_PAE, %eax cmpl $0, %edx jz 1f orl $X86_CR4_LA57, %eax 1: movl %eax, %cr4 /* Calculate address of paging_enabled() once we are executing in the trampoline */ leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax /* Prepare the stack for far return to Long Mode */ pushl $__KERNEL_CS pushl %eax /* Enable paging again */ movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 lret .code64 paging_enabled: /* Return from the trampoline */ jmp *%rdi /* * The trampoline code has a size limit. * Make sure we fail to compile if the trampoline code grows * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. */ .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 no_longmode: /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b #include "../../kernel/verify_cpu.S" .data gdt64: .word gdt_end - gdt .long 0 .word 0 .quad 0 gdt: .word gdt_end - gdt .long gdt .word 0 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ gdt_end: #ifdef CONFIG_EFI_STUB efi_config: .quad 0 #ifdef CONFIG_EFI_MIXED .global efi32_config efi32_config: .fill 5,8,0 .quad efi64_thunk .byte 0 #endif .global efi64_config efi64_config: .fill 5,8,0 .quad efi_call .byte 1 #endif /* CONFIG_EFI_STUB */ /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end: /* * Space for page tables (not in .bss so not zeroed) */ .section ".pgtable","a",@nobits .balign 4096 pgtable: .fill BOOT_PGT_SIZE, 1, 0 /* * The page table is going to be used instead of page table in the trampoline * memory. */ top_pgtable: .fill PAGE_SIZE, 1, 0
AirFortressIlikara/LS2K0300-linux-4.19
2,905
arch/x86/entry/vdso/vdso-layout.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/vdso.h> /* * Linker script for vDSO. This is an ELF shared object prelinked to * its virtual address, and with only one read-only segment. * This script controls its layout. */ #if defined(BUILD_VDSO64) # define SHDR_SIZE 64 #elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32) # define SHDR_SIZE 40 #else # error unknown VDSO target #endif #define NUM_FAKE_SHDRS 13 SECTIONS { /* * User/kernel shared data is before the vDSO. This may be a little * uglier than putting it after the vDSO, but it avoids issues with * non-allocatable things that dangle past the end of the PT_LOAD * segment. */ vvar_start = . - 3 * PAGE_SIZE; vvar_page = vvar_start; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset; #define __VVAR_KERNEL_LDS #include <asm/vvar.h> #undef __VVAR_KERNEL_LDS #undef EMIT_VVAR pvclock_page = vvar_start + PAGE_SIZE; hvclock_page = vvar_start + 2 * PAGE_SIZE; . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) *(.data*) *(.sdata*) *(.got.plt) *(.got) *(.gnu.linkonce.d.*) *(.bss*) *(.dynbss*) *(.gnu.linkonce.b.*) /* * Ideally this would live in a C file, but that won't * work cleanly for x32 until we start building the x32 * C code using an x32 toolchain. */ VDSO_FAKE_SECTION_TABLE_START = .; . = . + NUM_FAKE_SHDRS * SHDR_SIZE; VDSO_FAKE_SECTION_TABLE_END = .; } :text .fake_shstrtab : { *(.fake_shstrtab) } :text .note : { *(.note.*) } :text :note .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text /* * Text is well-separated from actual data: there's plenty of * stuff that isn't used at runtime in between. */ .text : { *(.text*) } :text =0x90909090, /* * At the end so that eu-elflint stays happy when vdso2c strips * these. A better implementation would avoid allocating space * for these. */ .altinstructions : { *(.altinstructions) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text /DISCARD/ : { *(.discard) *(.discard.*) *(__bug_table) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; }
AirFortressIlikara/LS2K0300-linux-4.19
4,861
arch/x86/entry/vdso/vdso32/sigreturn.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/unistd_32.h> #include <asm/asm-offsets.h> #ifndef SYSCALL_ENTER_KERNEL #define SYSCALL_ENTER_KERNEL int $0x80 #endif .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: popl %eax /* XXX does this mean it needs unwind info? */ movl $__NR_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_sigreturn: nop .size __kernel_sigreturn,.-.LSTART_sigreturn .globl __kernel_rt_sigreturn .type __kernel_rt_sigreturn,@function ALIGN __kernel_rt_sigreturn: .LSTART_rt_sigreturn: movl $__NR_rt_sigreturn, %eax SYSCALL_ENTER_KERNEL .LEND_rt_sigreturn: nop .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn .previous .section .eh_frame,"a",@progbits .LSTARTFRAMEDLSI1: .long .LENDCIEDLSI1-.LSTARTCIEDLSI1 .LSTARTCIEDLSI1: .long 0 /* CIE ID */ .byte 1 /* Version number */ .string "zRS" /* NUL-terminated augmentation string */ .uleb128 1 /* Code alignment factor */ .sleb128 -4 /* Data alignment factor */ .byte 8 /* Return address register column */ .uleb128 1 /* Augmentation value length */ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */ .byte 0 /* DW_CFA_nop */ .align 4 .LENDCIEDLSI1: .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */ .LSTARTFDEDLSI1: .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: The dwarf2 unwind routines will subtract 1 from the return address to get an address in the middle of the presumed call instruction. Since we didn't get here via a call, we need to include the nop before the real start to make up for it. */ .long .LSTART_sigreturn-1-. /* PC-relative start address */ .long .LEND_sigreturn-.LSTART_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is complicated by the fact that the "CFA" is always assumed to be the value of the stack pointer in the caller. This means that we must define the CFA of this body of code to be the saved value of the stack pointer in the sigcontext. Which also means that there is no fixed relation to the other saved registers, which means that we must use DW_CFA_expression to compute their addresses. It also means that when we adjust the stack with the popl, we have to do it all over again. */ #define do_cfa_expr(offset) \ .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ .byte 0x06; /* DW_OP_deref */ \ 1: #define do_expr(regno, offset) \ .byte 0x10; /* DW_CFA_expression */ \ .uleb128 regno; /* regno */ \ .uleb128 1f-0f; /* length */ \ 0: .byte 0x74; /* DW_OP_breg4 */ \ .sleb128 offset; /* offset */ \ 1: do_cfa_expr(IA32_SIGCONTEXT_sp+4) do_expr(0, IA32_SIGCONTEXT_ax+4) do_expr(1, IA32_SIGCONTEXT_cx+4) do_expr(2, IA32_SIGCONTEXT_dx+4) do_expr(3, IA32_SIGCONTEXT_bx+4) do_expr(5, IA32_SIGCONTEXT_bp+4) do_expr(6, IA32_SIGCONTEXT_si+4) do_expr(7, IA32_SIGCONTEXT_di+4) do_expr(8, IA32_SIGCONTEXT_ip+4) .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */ do_cfa_expr(IA32_SIGCONTEXT_sp) do_expr(0, IA32_SIGCONTEXT_ax) do_expr(1, IA32_SIGCONTEXT_cx) do_expr(2, IA32_SIGCONTEXT_dx) do_expr(3, IA32_SIGCONTEXT_bx) do_expr(5, IA32_SIGCONTEXT_bp) do_expr(6, IA32_SIGCONTEXT_si) do_expr(7, IA32_SIGCONTEXT_di) do_expr(8, IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI1: .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */ .LSTARTFDEDLSI2: .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */ /* HACK: See above wrt unwind library assumptions. */ .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 .uleb128 0 /* Augmentation */ /* What follows are the instructions for the table generation. We record the locations of each register saved. This is slightly less complicated than the above, since we don't modify the stack pointer in the process. */ do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp) do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax) do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx) do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx) do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx) do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp) do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si) do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di) do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip) .align 4 .LENDFDEDLSI2: .previous
AirFortressIlikara/LS2K0300-linux-4.19
1,757
arch/x86/entry/vdso/vdso32/note.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. * Here we can supply some information useful to userland. */ #include <linux/build-salt.h> #include <linux/version.h> #include <linux/elfnote.h> /* Ideally this would use UTS_NAME, but using a quoted string here doesn't work. Remember to change this when changing the kernel's name. */ ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END BUILD_SALT #ifdef CONFIG_XEN /* * Add a special note telling glibc's dynamic linker a fake hardware * flavor that it will use to choose the search path for libraries in the * same way it uses real hardware capabilities like "mmx". * We supply "nosegneg" as the fake capability, to indicate that we * do not like negative offsets in instructions using segment overrides, * since we implement those inefficiently. This makes it possible to * install libraries optimized to avoid those access patterns in someplace * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file * corresponding to the bits here is needed to make ldconfig work right. * It should contain: * hwcap 1 nosegneg * to match the mapping of bit to name that we give here. * * At runtime, the fake hardware feature will be considered to be present * if its bit is set in the mask word. So, we start with the mask 0, and * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen. */ #include "../../xen/vdso.h" /* Defines VDSO_NOTE_NONEGSEG_BIT. */ ELFNOTE_START(GNU, 2, "a") .long 1 /* ncaps */ VDSO32_NOTE_MASK: /* Symbol used by arch/x86/xen/setup.c */ .long 0 /* mask */ .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */ ELFNOTE_END #endif
AirFortressIlikara/LS2K0300-linux-4.19
2,410
arch/x86/entry/vdso/vdso32/system_call.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * AT_SYSINFO entry point */ #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> .text .globl __kernel_vsyscall .type __kernel_vsyscall,@function ALIGN __kernel_vsyscall: CFI_STARTPROC /* * Reshuffle regs so that all of any of the entry instructions * will preserve enough state. * * A really nice entry sequence would be: * pushl %edx * pushl %ecx * movl %esp, %ecx * * Unfortunately, naughty Android versions between July and December * 2015 actually hardcode the traditional Linux SYSENTER entry * sequence. That is severely broken for a number of reasons (ask * anyone with an AMD CPU, for example). Nonetheless, we try to keep * it working approximately as well as it ever worked. * * This link may eludicate some of the history: * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 * personally, I find it hard to understand what's going on there. * * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. * Execute an indirect call to the address in the AT_SYSINFO auxv * entry. That is the ONLY correct way to make a fast 32-bit system * call on Linux. (Open-coding int $0x80 is also fine, but it's * slow.) */ pushl %ecx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ecx, 0 pushl %edx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET edx, 0 pushl %ebp CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebp, 0 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" #ifdef CONFIG_X86_64 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 #else ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP #endif /* Enter using int $0x80 */ int $0x80 GLOBAL(int80_landing_pad) /* * Restore EDX and ECX in case they were clobbered. EBP is not * clobbered (the kernel restores it), but it's cleaner and * probably faster to pop it than to adjust ESP using addl. */ popl %ebp CFI_RESTORE ebp CFI_ADJUST_CFA_OFFSET -4 popl %edx CFI_RESTORE edx CFI_ADJUST_CFA_OFFSET -4 popl %ecx CFI_RESTORE ecx CFI_ADJUST_CFA_OFFSET -4 ret CFI_ENDPROC .size __kernel_vsyscall,.-__kernel_vsyscall .previous
AirFortressIlikara/LS2K0300-linux-4.19
2,175
arch/alpha/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/kernel/head.S * * initial boot stuff.. At this point, the bootloader has already * switched into OSF/1 PAL-code, and loaded us at the correct address * (START_ADDR). So there isn't much left for us to do: just set up * the kernel global pointer and jump to the kernel entry-point. */ #include <linux/init.h> #include <asm/asm-offsets.h> #include <asm/pal.h> #include <asm/setup.h> __HEAD .globl _stext .set noreorder .globl __start .ent __start _stext: __start: .prologue 0 br $27,1f 1: ldgp $29,0($27) /* We need to get current_task_info loaded up... */ lda $8,init_thread_union /* ... and find our stack ... */ lda $30,0x4000 - SIZEOF_PT_REGS($8) /* ... and then we can start the kernel. */ jsr $26,start_kernel call_pal PAL_halt .end __start #ifdef CONFIG_SMP .align 3 .globl __smp_callin .ent __smp_callin /* On entry here from SRM console, the HWPCB of the per-cpu slot for this processor has been loaded. We've arranged for the UNIQUE value for this process to contain the PCBB of the target idle task. */ __smp_callin: .prologue 1 ldgp $29,0($27) # First order of business, load the GP. call_pal PAL_rduniq # Grab the target PCBB. mov $0,$16 # Install it. call_pal PAL_swpctx lda $8,0x3fff # Find "current". bic $30,$8,$8 jsr $26,smp_callin call_pal PAL_halt .end __smp_callin #endif /* CONFIG_SMP */ # # The following two functions are needed for supporting SRM PALcode # on the PC164 (at least), since that PALcode manages the interrupt # masking, and we cannot duplicate the effort without causing problems # .align 3 .globl cserve_ena .ent cserve_ena cserve_ena: .prologue 0 bis $16,$16,$17 lda $16,52($31) call_pal PAL_cserve ret ($26) .end cserve_ena .align 3 .globl cserve_dis .ent cserve_dis cserve_dis: .prologue 0 bis $16,$16,$17 lda $16,53($31) call_pal PAL_cserve ret ($26) .end cserve_dis # # It is handy, on occasion, to make halt actually just loop. # Putting it here means we dont have to recompile the whole # kernel. # .align 3 .globl halt .ent halt halt: .prologue 0 call_pal PAL_halt .end halt
AirFortressIlikara/LS2K0300-linux-4.19
17,965
arch/alpha/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/kernel/entry.S * * Kernel entry-points. */ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/pal.h> #include <asm/errno.h> #include <asm/unistd.h> .text .set noat .cfi_sections .debug_frame /* Stack offsets. */ #define SP_OFF 184 #define SWITCH_STACK_SIZE 320 .macro CFI_START_OSF_FRAME func .align 4 .globl \func .type \func,@function \func: .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 48 .cfi_rel_offset 64, 8 .cfi_rel_offset $gp, 16 .cfi_rel_offset $16, 24 .cfi_rel_offset $17, 32 .cfi_rel_offset $18, 40 .endm .macro CFI_END_OSF_FRAME func .cfi_endproc .size \func, . - \func .endm /* * This defines the normal kernel pt-regs layout. * * regs 9-15 preserved by C code * regs 16-18 saved by PAL-code * regs 29-30 saved and set up by PAL-code * JRP - Save regs 16-18 in a special area of the stack, so that * the palcode-provided values are available to the signal handler. */ .macro SAVE_ALL subq $sp, SP_OFF, $sp .cfi_adjust_cfa_offset SP_OFF stq $0, 0($sp) stq $1, 8($sp) stq $2, 16($sp) stq $3, 24($sp) stq $4, 32($sp) stq $28, 144($sp) .cfi_rel_offset $0, 0 .cfi_rel_offset $1, 8 .cfi_rel_offset $2, 16 .cfi_rel_offset $3, 24 .cfi_rel_offset $4, 32 .cfi_rel_offset $28, 144 lda $2, alpha_mv stq $5, 40($sp) stq $6, 48($sp) stq $7, 56($sp) stq $8, 64($sp) stq $19, 72($sp) stq $20, 80($sp) stq $21, 88($sp) ldq $2, HAE_CACHE($2) stq $22, 96($sp) stq $23, 104($sp) stq $24, 112($sp) stq $25, 120($sp) stq $26, 128($sp) stq $27, 136($sp) stq $2, 152($sp) stq $16, 160($sp) stq $17, 168($sp) stq $18, 176($sp) .cfi_rel_offset $5, 40 .cfi_rel_offset $6, 48 .cfi_rel_offset $7, 56 .cfi_rel_offset $8, 64 .cfi_rel_offset $19, 72 .cfi_rel_offset $20, 80 .cfi_rel_offset $21, 88 .cfi_rel_offset $22, 96 .cfi_rel_offset $23, 104 .cfi_rel_offset $24, 112 .cfi_rel_offset $25, 120 .cfi_rel_offset $26, 128 .cfi_rel_offset $27, 136 .endm .macro RESTORE_ALL lda $19, alpha_mv ldq $0, 0($sp) ldq $1, 8($sp) ldq $2, 16($sp) ldq $3, 24($sp) ldq $21, 152($sp) ldq $20, HAE_CACHE($19) ldq $4, 32($sp) ldq $5, 40($sp) ldq $6, 48($sp) ldq $7, 56($sp) subq $20, $21, $20 ldq $8, 64($sp) beq $20, 99f ldq $20, HAE_REG($19) stq $21, HAE_CACHE($19) stq $21, 0($20) 99: ldq $19, 72($sp) ldq $20, 80($sp) ldq $21, 88($sp) ldq $22, 96($sp) ldq $23, 104($sp) ldq $24, 112($sp) ldq $25, 120($sp) ldq $26, 128($sp) ldq $27, 136($sp) ldq $28, 144($sp) addq $sp, SP_OFF, $sp .cfi_restore $0 .cfi_restore $1 .cfi_restore $2 .cfi_restore $3 .cfi_restore $4 .cfi_restore $5 .cfi_restore $6 .cfi_restore $7 .cfi_restore $8 .cfi_restore $19 .cfi_restore $20 .cfi_restore $21 .cfi_restore $22 .cfi_restore $23 .cfi_restore $24 .cfi_restore $25 .cfi_restore $26 .cfi_restore $27 .cfi_restore $28 .cfi_adjust_cfa_offset -SP_OFF .endm .macro DO_SWITCH_STACK bsr $1, do_switch_stack .cfi_adjust_cfa_offset SWITCH_STACK_SIZE .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 /* We don't really care about the FP registers for debugging. */ .endm .macro UNDO_SWITCH_STACK bsr $1, undo_switch_stack .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE .endm /* * Non-syscall kernel entry points. */ CFI_START_OSF_FRAME entInt SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $19 jsr $31, do_entInt CFI_END_OSF_FRAME entInt CFI_START_OSF_FRAME entArith SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $18 jsr $31, do_entArith CFI_END_OSF_FRAME entArith CFI_START_OSF_FRAME entMM SAVE_ALL /* save $9 - $15 so the inline exception code can manipulate them. */ subq $sp, 56, $sp .cfi_adjust_cfa_offset 56 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 addq $sp, 56, $19 /* handle the fault */ lda $8, 0x3fff bic $sp, $8, $8 jsr $26, do_page_fault /* reload the registers after the exception code played. */ ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) addq $sp, 56, $sp .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -56 /* finish up the syscall as normal. */ br ret_from_sys_call CFI_END_OSF_FRAME entMM CFI_START_OSF_FRAME entIF SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $17 jsr $31, do_entIF CFI_END_OSF_FRAME entIF CFI_START_OSF_FRAME entUna lda $sp, -256($sp) .cfi_adjust_cfa_offset 256 stq $0, 0($sp) .cfi_rel_offset $0, 0 .cfi_remember_state ldq $0, 256($sp) /* get PS */ stq $1, 8($sp) stq $2, 16($sp) stq $3, 24($sp) and $0, 8, $0 /* user mode? */ stq $4, 32($sp) bne $0, entUnaUser /* yup -> do user-level unaligned fault */ stq $5, 40($sp) stq $6, 48($sp) stq $7, 56($sp) stq $8, 64($sp) stq $9, 72($sp) stq $10, 80($sp) stq $11, 88($sp) stq $12, 96($sp) stq $13, 104($sp) stq $14, 112($sp) stq $15, 120($sp) /* 16-18 PAL-saved */ stq $19, 152($sp) stq $20, 160($sp) stq $21, 168($sp) stq $22, 176($sp) stq $23, 184($sp) stq $24, 192($sp) stq $25, 200($sp) stq $26, 208($sp) stq $27, 216($sp) stq $28, 224($sp) mov $sp, $19 stq $gp, 232($sp) .cfi_rel_offset $1, 1*8 .cfi_rel_offset $2, 2*8 .cfi_rel_offset $3, 3*8 .cfi_rel_offset $4, 4*8 .cfi_rel_offset $5, 5*8 .cfi_rel_offset $6, 6*8 .cfi_rel_offset $7, 7*8 .cfi_rel_offset $8, 8*8 .cfi_rel_offset $9, 9*8 .cfi_rel_offset $10, 10*8 .cfi_rel_offset $11, 11*8 .cfi_rel_offset $12, 12*8 .cfi_rel_offset $13, 13*8 .cfi_rel_offset $14, 14*8 .cfi_rel_offset $15, 15*8 .cfi_rel_offset $19, 19*8 .cfi_rel_offset $20, 20*8 .cfi_rel_offset $21, 21*8 .cfi_rel_offset $22, 22*8 .cfi_rel_offset $23, 23*8 .cfi_rel_offset $24, 24*8 .cfi_rel_offset $25, 25*8 .cfi_rel_offset $26, 26*8 .cfi_rel_offset $27, 27*8 .cfi_rel_offset $28, 28*8 .cfi_rel_offset $29, 29*8 lda $8, 0x3fff stq $31, 248($sp) bic $sp, $8, $8 jsr $26, do_entUna ldq $0, 0($sp) ldq $1, 8($sp) ldq $2, 16($sp) ldq $3, 24($sp) ldq $4, 32($sp) ldq $5, 40($sp) ldq $6, 48($sp) ldq $7, 56($sp) ldq $8, 64($sp) ldq $9, 72($sp) ldq $10, 80($sp) ldq $11, 88($sp) ldq $12, 96($sp) ldq $13, 104($sp) ldq $14, 112($sp) ldq $15, 120($sp) /* 16-18 PAL-saved */ ldq $19, 152($sp) ldq $20, 160($sp) ldq $21, 168($sp) ldq $22, 176($sp) ldq $23, 184($sp) ldq $24, 192($sp) ldq $25, 200($sp) ldq $26, 208($sp) ldq $27, 216($sp) ldq $28, 224($sp) ldq $gp, 232($sp) lda $sp, 256($sp) .cfi_restore $1 .cfi_restore $2 .cfi_restore $3 .cfi_restore $4 .cfi_restore $5 .cfi_restore $6 .cfi_restore $7 .cfi_restore $8 .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_restore $19 .cfi_restore $20 .cfi_restore $21 .cfi_restore $22 .cfi_restore $23 .cfi_restore $24 .cfi_restore $25 .cfi_restore $26 .cfi_restore $27 .cfi_restore $28 .cfi_restore $29 .cfi_adjust_cfa_offset -256 call_pal PAL_rti .align 4 entUnaUser: .cfi_restore_state ldq $0, 0($sp) /* restore original $0 */ lda $sp, 256($sp) /* pop entUna's stack frame */ .cfi_restore $0 .cfi_adjust_cfa_offset -256 SAVE_ALL /* setup normal kernel stack */ lda $sp, -56($sp) .cfi_adjust_cfa_offset 56 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) .cfi_rel_offset $9, 0 .cfi_rel_offset $10, 8 .cfi_rel_offset $11, 16 .cfi_rel_offset $12, 24 .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 lda $8, 0x3fff addq $sp, 56, $19 bic $sp, $8, $8 jsr $26, do_entUnaUser ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) lda $sp, 56($sp) .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 .cfi_restore $12 .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 .cfi_adjust_cfa_offset -56 br ret_from_sys_call CFI_END_OSF_FRAME entUna CFI_START_OSF_FRAME entDbg SAVE_ALL lda $8, 0x3fff lda $26, ret_from_sys_call bic $sp, $8, $8 mov $sp, $16 jsr $31, do_entDbg CFI_END_OSF_FRAME entDbg /* * The system call entry point is special. Most importantly, it looks * like a function call to userspace as far as clobbered registers. We * do preserve the argument registers (for syscall restarts) and $26 * (for leaf syscall functions). * * So much for theory. We don't take advantage of this yet. * * Note that a0-a2 are not saved by PALcode as with the other entry points. */ .align 4 .globl entSys .type entSys, @function .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 48 .cfi_rel_offset 64, 8 .cfi_rel_offset $gp, 16 entSys: SAVE_ALL lda $8, 0x3fff bic $sp, $8, $8 lda $4, NR_SYSCALLS($31) stq $16, SP_OFF+24($sp) lda $5, sys_call_table lda $27, sys_ni_syscall cmpult $0, $4, $4 ldl $3, TI_FLAGS($8) stq $17, SP_OFF+32($sp) s8addq $0, $5, $5 stq $18, SP_OFF+40($sp) .cfi_rel_offset $16, SP_OFF+24 .cfi_rel_offset $17, SP_OFF+32 .cfi_rel_offset $18, SP_OFF+40 #ifdef CONFIG_AUDITSYSCALL lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and $3, $6, $3 #endif bne $3, strace beq $4, 1f ldq $27, 0($5) 1: jsr $26, ($27), alpha_ni_syscall ldgp $gp, 0($26) blt $0, $syscall_error /* the call failed */ stq $0, 0($sp) stq $31, 72($sp) /* a3=0 => no error */ .align 4 .globl ret_from_sys_call ret_from_sys_call: cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ ldq $0, SP_OFF($sp) and $0, 8, $0 beq $0, ret_to_kernel ret_to_user: /* Make sure need_resched and sigpending don't change between sampling and the rti. */ lda $16, 7 call_pal PAL_swpipl ldl $17, TI_FLAGS($8) and $17, _TIF_WORK_MASK, $2 bne $2, work_pending restore_all: .cfi_remember_state RESTORE_ALL call_pal PAL_rti ret_to_kernel: .cfi_restore_state lda $16, 7 call_pal PAL_swpipl br restore_all .align 3 $syscall_error: /* * Some system calls (e.g., ptrace) can return arbitrary * values which might normally be mistaken as error numbers. * Those functions must zero $0 (v0) directly in the stack * frame to indicate that a negative return value wasn't an * error number.. */ ldq $18, 0($sp) /* old syscall nr (zero if success) */ beq $18, $ret_success ldq $19, 72($sp) /* .. and this a3 */ subq $31, $0, $0 /* with error in v0 */ addq $31, 1, $1 /* set a3 for errno return */ stq $0, 0($sp) mov $31, $26 /* tell "ret_from_sys_call" we can restart */ stq $1, 72($sp) /* a3 for return */ br ret_from_sys_call $ret_success: stq $0, 0($sp) stq $31, 72($sp) /* a3=0 => no error */ br ret_from_sys_call /* * Do all cleanup when returning from all interrupts and system calls. * * Arguments: * $8: current. * $17: TI_FLAGS. * $18: The old syscall number, or zero if this is not a return * from a syscall that errored and is possibly restartable. * $19: The old a3 value */ .align 4 .type work_pending, @function work_pending: and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 bne $2, $work_notifysig $work_resched: /* * We can get here only if we returned from syscall without SIGPENDING * or got through work_notifysig already. Either case means no syscall * restarts for us, so let $18 and $19 burn. */ jsr $26, schedule mov 0, $18 br ret_to_user $work_notifysig: mov $sp, $16 DO_SWITCH_STACK jsr $26, do_work_pending UNDO_SWITCH_STACK br restore_all /* * PTRACE syscall handler */ .align 4 .type strace, @function strace: /* set up signal stack, call syscall_trace */ DO_SWITCH_STACK jsr $26, syscall_trace_enter /* returns the syscall number */ UNDO_SWITCH_STACK /* get the arguments back.. */ ldq $16, SP_OFF+24($sp) ldq $17, SP_OFF+32($sp) ldq $18, SP_OFF+40($sp) ldq $19, 72($sp) ldq $20, 80($sp) ldq $21, 88($sp) /* get the system call pointer.. */ lda $1, NR_SYSCALLS($31) lda $2, sys_call_table lda $27, alpha_ni_syscall cmpult $0, $1, $1 s8addq $0, $2, $2 beq $1, 1f ldq $27, 0($2) 1: jsr $26, ($27), sys_gettimeofday ret_from_straced: ldgp $gp, 0($26) /* check return.. */ blt $0, $strace_error /* the call failed */ stq $31, 72($sp) /* a3=0 => no error */ $strace_success: stq $0, 0($sp) /* save return value */ DO_SWITCH_STACK jsr $26, syscall_trace_leave UNDO_SWITCH_STACK br $31, ret_from_sys_call .align 3 $strace_error: ldq $18, 0($sp) /* old syscall nr (zero if success) */ beq $18, $strace_success ldq $19, 72($sp) /* .. and this a3 */ subq $31, $0, $0 /* with error in v0 */ addq $31, 1, $1 /* set a3 for errno return */ stq $0, 0($sp) stq $1, 72($sp) /* a3 for return */ DO_SWITCH_STACK mov $18, $9 /* save old syscall number */ mov $19, $10 /* save old a3 */ jsr $26, syscall_trace_leave mov $9, $18 mov $10, $19 UNDO_SWITCH_STACK mov $31, $26 /* tell "ret_from_sys_call" we can restart */ br ret_from_sys_call CFI_END_OSF_FRAME entSys /* * Save and restore the switch stack -- aka the balance of the user context. */ .align 4 .type do_switch_stack, @function .cfi_startproc simple .cfi_return_column 64 .cfi_def_cfa $sp, 0 .cfi_register 64, $1 do_switch_stack: lda $sp, -SWITCH_STACK_SIZE($sp) .cfi_adjust_cfa_offset SWITCH_STACK_SIZE stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) stq $12, 24($sp) stq $13, 32($sp) stq $14, 40($sp) stq $15, 48($sp) stq $26, 56($sp) stt $f0, 64($sp) stt $f1, 72($sp) stt $f2, 80($sp) stt $f3, 88($sp) stt $f4, 96($sp) stt $f5, 104($sp) stt $f6, 112($sp) stt $f7, 120($sp) stt $f8, 128($sp) stt $f9, 136($sp) stt $f10, 144($sp) stt $f11, 152($sp) stt $f12, 160($sp) stt $f13, 168($sp) stt $f14, 176($sp) stt $f15, 184($sp) stt $f16, 192($sp) stt $f17, 200($sp) stt $f18, 208($sp) stt $f19, 216($sp) stt $f20, 224($sp) stt $f21, 232($sp) stt $f22, 240($sp) stt $f23, 248($sp) stt $f24, 256($sp) stt $f25, 264($sp) stt $f26, 272($sp) stt $f27, 280($sp) mf_fpcr $f0 # get fpcr stt $f28, 288($sp) stt $f29, 296($sp) stt $f30, 304($sp) stt $f0, 312($sp) # save fpcr in slot of $f31 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. ret $31, ($1), 1 .cfi_endproc .size do_switch_stack, .-do_switch_stack .align 4 .type undo_switch_stack, @function .cfi_startproc simple .cfi_def_cfa $sp, 0 .cfi_register 64, $1 undo_switch_stack: ldq $9, 0($sp) ldq $10, 8($sp) ldq $11, 16($sp) ldq $12, 24($sp) ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) ldq $26, 56($sp) ldt $f30, 312($sp) # get saved fpcr ldt $f0, 64($sp) ldt $f1, 72($sp) ldt $f2, 80($sp) ldt $f3, 88($sp) mt_fpcr $f30 # install saved fpcr ldt $f4, 96($sp) ldt $f5, 104($sp) ldt $f6, 112($sp) ldt $f7, 120($sp) ldt $f8, 128($sp) ldt $f9, 136($sp) ldt $f10, 144($sp) ldt $f11, 152($sp) ldt $f12, 160($sp) ldt $f13, 168($sp) ldt $f14, 176($sp) ldt $f15, 184($sp) ldt $f16, 192($sp) ldt $f17, 200($sp) ldt $f18, 208($sp) ldt $f19, 216($sp) ldt $f20, 224($sp) ldt $f21, 232($sp) ldt $f22, 240($sp) ldt $f23, 248($sp) ldt $f24, 256($sp) ldt $f25, 264($sp) ldt $f26, 272($sp) ldt $f27, 280($sp) ldt $f28, 288($sp) ldt $f29, 296($sp) ldt $f30, 304($sp) lda $sp, SWITCH_STACK_SIZE($sp) ret $31, ($1), 1 .cfi_endproc .size undo_switch_stack, .-undo_switch_stack /* * The meat of the context switch code. */ .align 4 .globl alpha_switch_to .type alpha_switch_to, @function .cfi_startproc alpha_switch_to: DO_SWITCH_STACK call_pal PAL_swpctx lda $8, 0x3fff UNDO_SWITCH_STACK bic $sp, $8, $8 mov $17, $0 ret .cfi_endproc .size alpha_switch_to, .-alpha_switch_to /* * New processes begin life here. */ .globl ret_from_fork .align 4 .ent ret_from_fork ret_from_fork: lda $26, ret_from_sys_call mov $17, $16 jmp $31, schedule_tail .end ret_from_fork /* * ... and new kernel threads - here */ .align 4 .globl ret_from_kernel_thread .ent ret_from_kernel_thread ret_from_kernel_thread: mov $17, $16 jsr $26, schedule_tail mov $9, $27 mov $10, $16 jsr $26, ($9) br $31, ret_to_user .end ret_from_kernel_thread /* * Special system calls. Most of these are special in that they either * have to play switch_stack games or in some way use the pt_regs struct. */ .macro fork_like name .align 4 .globl alpha_\name .ent alpha_\name alpha_\name: .prologue 0 bsr $1, do_switch_stack jsr $26, sys_\name ldq $26, 56($sp) lda $sp, SWITCH_STACK_SIZE($sp) ret .end alpha_\name .endm fork_like fork fork_like vfork fork_like clone .align 4 .globl sys_sigreturn .ent sys_sigreturn sys_sigreturn: .prologue 0 lda $9, ret_from_straced cmpult $26, $9, $9 lda $sp, -SWITCH_STACK_SIZE($sp) jsr $26, do_sigreturn bne $9, 1f jsr $26, syscall_trace_leave 1: br $1, undo_switch_stack br ret_from_sys_call .end sys_sigreturn .align 4 .globl sys_rt_sigreturn .ent sys_rt_sigreturn sys_rt_sigreturn: .prologue 0 lda $9, ret_from_straced cmpult $26, $9, $9 lda $sp, -SWITCH_STACK_SIZE($sp) jsr $26, do_rt_sigreturn bne $9, 1f jsr $26, syscall_trace_leave 1: br $1, undo_switch_stack br ret_from_sys_call .end sys_rt_sigreturn .align 4 .globl alpha_ni_syscall .ent alpha_ni_syscall alpha_ni_syscall: .prologue 0 /* Special because it also implements overflow handling via syscall number 0. And if you recall, zero is a special trigger for "not an error". Store large non-zero there. */ lda $0, -ENOSYS unop stq $0, 0($sp) ret .end alpha_ni_syscall
AirFortressIlikara/LS2K0300-linux-4.19
13,756
arch/alpha/kernel/systbls.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/kernel/systbls.S * * The system call table. */ #include <asm/unistd.h> .data .align 3 .globl sys_call_table sys_call_table: .quad alpha_ni_syscall /* 0 */ .quad sys_exit .quad alpha_fork .quad sys_read .quad sys_write .quad alpha_ni_syscall /* 5 */ .quad sys_close .quad sys_osf_wait4 .quad alpha_ni_syscall .quad sys_link .quad sys_unlink /* 10 */ .quad alpha_ni_syscall .quad sys_chdir .quad sys_fchdir .quad sys_mknod .quad sys_chmod /* 15 */ .quad sys_chown .quad sys_osf_brk .quad alpha_ni_syscall .quad sys_lseek .quad sys_getxpid /* 20 */ .quad sys_osf_mount .quad sys_umount .quad sys_setuid .quad sys_getxuid .quad alpha_ni_syscall /* 25 */ .quad sys_ptrace .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 30 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_access .quad alpha_ni_syscall .quad alpha_ni_syscall /* 35 */ .quad sys_sync .quad sys_kill .quad alpha_ni_syscall .quad sys_setpgid .quad alpha_ni_syscall /* 40 */ .quad sys_dup .quad sys_alpha_pipe .quad sys_osf_set_program_attributes .quad alpha_ni_syscall .quad sys_open /* 45 */ .quad alpha_ni_syscall .quad sys_getxgid .quad sys_osf_sigprocmask .quad alpha_ni_syscall .quad alpha_ni_syscall /* 50 */ .quad sys_acct .quad sys_sigpending .quad alpha_ni_syscall .quad sys_ioctl .quad alpha_ni_syscall /* 55 */ .quad alpha_ni_syscall .quad sys_symlink .quad sys_readlink .quad sys_execve .quad sys_umask /* 60 */ .quad sys_chroot .quad alpha_ni_syscall .quad sys_getpgrp .quad sys_getpagesize .quad alpha_ni_syscall /* 65 */ .quad alpha_vfork .quad sys_newstat .quad sys_newlstat .quad alpha_ni_syscall .quad alpha_ni_syscall /* 70 */ .quad sys_osf_mmap .quad alpha_ni_syscall .quad sys_munmap .quad sys_mprotect .quad sys_madvise /* 75 */ .quad sys_vhangup .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_getgroups /* map BSD's setpgrp to sys_setpgid for binary compatibility: */ .quad sys_setgroups /* 80 */ .quad alpha_ni_syscall .quad sys_setpgid .quad sys_osf_setitimer .quad alpha_ni_syscall .quad alpha_ni_syscall /* 85 */ .quad sys_osf_getitimer .quad sys_gethostname .quad sys_sethostname .quad sys_getdtablesize .quad sys_dup2 /* 90 */ .quad sys_newfstat .quad sys_fcntl .quad sys_osf_select .quad sys_poll .quad sys_fsync /* 95 */ .quad sys_setpriority .quad sys_socket .quad sys_connect .quad sys_accept .quad sys_osf_getpriority /* 100 */ .quad sys_send .quad sys_recv .quad sys_sigreturn .quad sys_bind .quad sys_setsockopt /* 105 */ .quad sys_listen .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 110 */ .quad sys_sigsuspend .quad sys_osf_sigstack .quad sys_recvmsg .quad sys_sendmsg .quad alpha_ni_syscall /* 115 */ .quad sys_osf_gettimeofday .quad sys_osf_getrusage .quad sys_getsockopt .quad alpha_ni_syscall #ifdef CONFIG_OSF4_COMPAT .quad sys_osf_readv /* 120 */ .quad sys_osf_writev #else .quad sys_readv /* 120 */ .quad sys_writev #endif .quad sys_osf_settimeofday .quad sys_fchown .quad sys_fchmod .quad sys_recvfrom /* 125 */ .quad sys_setreuid .quad sys_setregid .quad sys_rename .quad sys_truncate .quad sys_ftruncate /* 130 */ .quad sys_flock .quad sys_setgid .quad sys_sendto .quad sys_shutdown .quad sys_socketpair /* 135 */ .quad sys_mkdir .quad sys_rmdir .quad sys_osf_utimes .quad alpha_ni_syscall .quad alpha_ni_syscall /* 140 */ .quad sys_getpeername .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_getrlimit .quad sys_setrlimit /* 145 */ .quad alpha_ni_syscall .quad sys_setsid .quad sys_quotactl .quad alpha_ni_syscall .quad sys_getsockname /* 150 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 155 */ .quad sys_osf_sigaction .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_osf_getdirentries .quad sys_osf_statfs /* 160 */ .quad sys_osf_fstatfs .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_osf_getdomainname /* 165 */ .quad sys_setdomainname .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 170 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 175 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 180 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 185 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 190 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 195 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* The OSF swapon has two extra arguments, but we ignore them. */ .quad sys_swapon .quad sys_msgctl /* 200 */ .quad sys_msgget .quad sys_msgrcv .quad sys_msgsnd .quad sys_semctl .quad sys_semget /* 205 */ .quad sys_semop .quad sys_osf_utsname .quad sys_lchown .quad sys_shmat .quad sys_shmctl /* 210 */ .quad sys_shmdt .quad sys_shmget .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 215 */ .quad alpha_ni_syscall .quad sys_msync .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 220 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_osf_stat .quad sys_osf_lstat /* 225 */ .quad sys_osf_fstat .quad sys_osf_statfs64 .quad sys_osf_fstatfs64 .quad alpha_ni_syscall .quad alpha_ni_syscall /* 230 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_getpgid .quad sys_getsid .quad sys_sigaltstack /* 235 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 240 */ .quad sys_osf_sysinfo .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_osf_proplist_syscall .quad alpha_ni_syscall /* 245 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 250 */ .quad sys_osf_usleep_thread .quad alpha_ni_syscall .quad alpha_ni_syscall .quad sys_sysfs .quad alpha_ni_syscall /* 255 */ .quad sys_osf_getsysinfo .quad sys_osf_setsysinfo .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 260 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 265 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 270 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 275 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 280 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 285 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 290 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* 295 */ .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall .quad alpha_ni_syscall /* linux-specific system calls start at 300 */ .quad sys_bdflush /* 300 */ .quad sys_sethae .quad sys_mount .quad sys_old_adjtimex .quad sys_swapoff .quad sys_getdents /* 305 */ .quad sys_ni_syscall /* 306: old create_module */ .quad sys_init_module .quad sys_delete_module .quad sys_ni_syscall /* 309: old get_kernel_syms */ .quad sys_syslog /* 310 */ .quad sys_reboot .quad alpha_clone .quad sys_uselib .quad sys_mlock .quad sys_munlock /* 315 */ .quad sys_mlockall .quad sys_munlockall .quad sys_sysinfo .quad sys_sysctl .quad sys_ni_syscall /* 320 */ .quad sys_oldumount .quad sys_swapon .quad sys_times .quad sys_personality .quad sys_setfsuid /* 325 */ .quad sys_setfsgid .quad sys_ustat .quad sys_statfs .quad sys_fstatfs .quad sys_sched_setparam /* 330 */ .quad sys_sched_getparam .quad sys_sched_setscheduler .quad sys_sched_getscheduler .quad sys_sched_yield .quad sys_sched_get_priority_max /* 335 */ .quad sys_sched_get_priority_min .quad sys_sched_rr_get_interval .quad sys_ni_syscall /* sys_afs_syscall */ .quad sys_newuname .quad sys_nanosleep /* 340 */ .quad sys_mremap .quad sys_ni_syscall /* old nfsservctl */ .quad sys_setresuid .quad sys_getresuid .quad sys_pciconfig_read /* 345 */ .quad sys_pciconfig_write .quad sys_ni_syscall /* 347: old query_module */ .quad sys_prctl .quad sys_pread64 .quad sys_pwrite64 /* 350 */ .quad sys_rt_sigreturn .quad sys_rt_sigaction .quad sys_rt_sigprocmask .quad sys_rt_sigpending .quad sys_rt_sigtimedwait /* 355 */ .quad sys_rt_sigqueueinfo .quad sys_rt_sigsuspend .quad sys_select .quad sys_gettimeofday .quad sys_settimeofday /* 360 */ .quad sys_getitimer .quad sys_setitimer .quad sys_utimes .quad sys_getrusage .quad sys_wait4 /* 365 */ .quad sys_adjtimex .quad sys_getcwd .quad sys_capget .quad sys_capset .quad sys_sendfile64 /* 370 */ .quad sys_setresgid .quad sys_getresgid .quad sys_ni_syscall /* sys_dipc */ .quad sys_pivot_root .quad sys_mincore /* 375 */ .quad sys_pciconfig_iobase .quad sys_getdents64 .quad sys_gettid .quad sys_readahead .quad sys_ni_syscall /* 380 */ .quad sys_tkill .quad sys_setxattr .quad sys_lsetxattr .quad sys_fsetxattr .quad sys_getxattr /* 385 */ .quad sys_lgetxattr .quad sys_fgetxattr .quad sys_listxattr .quad sys_llistxattr .quad sys_flistxattr /* 390 */ .quad sys_removexattr .quad sys_lremovexattr .quad sys_fremovexattr .quad sys_futex .quad sys_sched_setaffinity /* 395 */ .quad sys_sched_getaffinity .quad sys_ni_syscall /* 397, tux */ .quad sys_io_setup .quad sys_io_destroy .quad sys_io_getevents /* 400 */ .quad sys_io_submit .quad sys_io_cancel .quad sys_ni_syscall /* 403, sys_alloc_hugepages */ .quad sys_ni_syscall /* 404, sys_free_hugepages */ .quad sys_exit_group /* 405 */ .quad sys_lookup_dcookie .quad sys_epoll_create .quad sys_epoll_ctl .quad sys_epoll_wait .quad sys_remap_file_pages /* 410 */ .quad sys_set_tid_address .quad sys_restart_syscall .quad sys_fadvise64 .quad sys_timer_create .quad sys_timer_settime /* 415 */ .quad sys_timer_gettime .quad sys_timer_getoverrun .quad sys_timer_delete .quad sys_clock_settime .quad sys_clock_gettime /* 420 */ .quad sys_clock_getres .quad sys_clock_nanosleep .quad sys_semtimedop .quad sys_tgkill .quad sys_stat64 /* 425 */ .quad sys_lstat64 .quad sys_fstat64 .quad sys_ni_syscall /* sys_vserver */ .quad sys_ni_syscall /* sys_mbind */ .quad sys_ni_syscall /* sys_get_mempolicy */ .quad sys_ni_syscall /* sys_set_mempolicy */ .quad sys_mq_open .quad sys_mq_unlink .quad sys_mq_timedsend .quad sys_mq_timedreceive /* 435 */ .quad sys_mq_notify .quad sys_mq_getsetattr .quad sys_waitid .quad sys_add_key .quad sys_request_key /* 440 */ .quad sys_keyctl .quad sys_ioprio_set .quad sys_ioprio_get .quad sys_inotify_init .quad sys_inotify_add_watch /* 445 */ .quad sys_inotify_rm_watch .quad sys_fdatasync .quad sys_kexec_load .quad sys_migrate_pages .quad sys_openat /* 450 */ .quad sys_mkdirat .quad sys_mknodat .quad sys_fchownat .quad sys_futimesat .quad sys_fstatat64 /* 455 */ .quad sys_unlinkat .quad sys_renameat .quad sys_linkat .quad sys_symlinkat .quad sys_readlinkat /* 460 */ .quad sys_fchmodat .quad sys_faccessat .quad sys_pselect6 .quad sys_ppoll .quad sys_unshare /* 465 */ .quad sys_set_robust_list .quad sys_get_robust_list .quad sys_splice .quad sys_sync_file_range .quad sys_tee /* 470 */ .quad sys_vmsplice .quad sys_move_pages .quad sys_getcpu .quad sys_epoll_pwait .quad sys_utimensat /* 475 */ .quad sys_signalfd .quad sys_ni_syscall /* sys_timerfd */ .quad sys_eventfd .quad sys_recvmmsg .quad sys_fallocate /* 480 */ .quad sys_timerfd_create .quad sys_timerfd_settime .quad sys_timerfd_gettime .quad sys_signalfd4 .quad sys_eventfd2 /* 485 */ .quad sys_epoll_create1 .quad sys_dup3 .quad sys_pipe2 .quad sys_inotify_init1 .quad sys_preadv /* 490 */ .quad sys_pwritev .quad sys_rt_tgsigqueueinfo .quad sys_perf_event_open .quad sys_fanotify_init .quad sys_fanotify_mark /* 495 */ .quad sys_prlimit64 .quad sys_name_to_handle_at .quad sys_open_by_handle_at .quad sys_clock_adjtime .quad sys_syncfs /* 500 */ .quad sys_setns .quad sys_accept4 .quad sys_sendmmsg .quad sys_process_vm_readv .quad sys_process_vm_writev /* 505 */ .quad sys_kcmp .quad sys_finit_module .quad sys_sched_setattr .quad sys_sched_getattr .quad sys_renameat2 /* 510 */ .quad sys_getrandom .quad sys_memfd_create .quad sys_execveat .quad sys_seccomp .quad sys_bpf /* 515 */ .quad sys_userfaultfd .quad sys_membarrier .quad sys_mlock2 .quad sys_copy_file_range .quad sys_preadv2 /* 520 */ .quad sys_pwritev2 .quad sys_statx .size sys_call_table, . - sys_call_table .type sys_call_table, @object /* Remember to update everything, kids. */ .ifne (. - sys_call_table) - (NR_SYSCALLS * 8) .err .endif
AirFortressIlikara/LS2K0300-linux-4.19
1,441
arch/alpha/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/setup.h> OUTPUT_FORMAT("elf64-alpha") OUTPUT_ARCH(alpha) ENTRY(__start) PHDRS { kernel PT_LOAD; note PT_NOTE; } jiffies = jiffies_64; SECTIONS { #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS . = 0xfffffc0000310000; #else . = 0xfffffc0001010000; #endif _text = .; /* Text and read-only data */ .text : { HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) } :kernel swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ NOTES :kernel :note .dummy : { *(.dummy) } :kernel RODATA EXCEPTION_TABLE(16) /* Will be freed after init */ __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) PERCPU_SECTION(L1_CACHE_BYTES) /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page needed for the THREAD_SIZE aligned init_task gets freed after init */ . = ALIGN(THREAD_SIZE); __init_end = .; /* Freed after init ends here */ _sdata = .; /* Start of rw data section */ _data = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) } .sdata : { *(.sdata) } _edata = .; /* End of data section */ BSS_SECTION(0, 0, 0) _end = .; .mdebug 0 : { *(.mdebug) } .note 0 : { *(.note) } STABS_DEBUG DWARF_DEBUG DISCARDS }
AirFortressIlikara/LS2K0300-linux-4.19
3,290
arch/alpha/math-emu/qrnnd.S
# Alpha 21064 __udiv_qrnnd # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. # This file is part of GCC. # The GNU MP Library is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # In addition to the permissions in the GNU General Public License, the # Free Software Foundation gives you unlimited permission to link the # compiled version of this file with other programs, and to distribute # those programs without any restriction coming from the use of this # file. (The General Public License restrictions do apply in other # respects; for example, they cover modification of the file, and # distribution when not linked into another program.) # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public # License for more details. # You should have received a copy of the GNU General Public License # along with GCC; see the file COPYING. If not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, # MA 02111-1307, USA. .set noreorder .set noat .text .globl __udiv_qrnnd .ent __udiv_qrnnd __udiv_qrnnd: .frame $30,0,$26,0 .prologue 0 #define cnt $2 #define tmp $3 #define rem_ptr $16 #define n1 $17 #define n0 $18 #define d $19 #define qb $20 #define AT $at ldiq cnt,16 blt d,$largedivisor $loop1: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule d,n1,qb subq n1,d,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,$loop1 stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 $largedivisor: and n0,1,$4 srl n0,1,n0 sll n1,63,tmp or tmp,n0,n0 srl n1,1,n1 and d,1,$6 srl d,1,$5 addq $5,$6,$5 $loop2: cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 cmplt n0,0,tmp addq n1,n1,n1 bis n1,tmp,n1 addq n0,n0,n0 cmpule $5,n1,qb subq n1,$5,tmp cmovne qb,tmp,n1 bis n0,qb,n0 subq cnt,1,cnt bgt cnt,$loop2 addq n1,n1,n1 addq $4,n1,n1 bne $6,$Odd stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 $Odd: /* q' in n0. r' in n1 */ addq n1,n0,n1 cmpult n1,n0,tmp # tmp := carry from addq subq n1,d,AT addq n0,tmp,n0 cmovne tmp,AT,n1 cmpult n1,d,tmp addq n0,1,AT cmoveq tmp,AT,n0 subq n1,d,AT cmoveq tmp,AT,n1 stq n1,0(rem_ptr) bis $31,n0,$0 ret $31,($26),1 .end __udiv_qrnnd
AirFortressIlikara/LS2K0300-linux-4.19
2,491
arch/alpha/lib/clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/clear_user.S * Contributed by Richard Henderson <rth@tamu.edu> * * Zero user space, handling exceptions as we go. * * We have to make sure that $0 is always up-to-date and contains the * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exception-99b($31); \ .previous .set noat .set noreorder .align 4 .globl __clear_user .ent __clear_user .frame $30, 0, $26 .prologue 0 $loop: and $1, 3, $4 # e0 : beq $4, 1f # .. e1 : 0: EX( stq_u $31, 0($16) ) # e0 : zero one word subq $0, 8, $0 # .. e1 : subq $4, 1, $4 # e0 : addq $16, 8, $16 # .. e1 : bne $4, 0b # e1 : unop # : 1: bic $1, 3, $1 # e0 : beq $1, $tail # .. e1 : 2: EX( stq_u $31, 0($16) ) # e0 : zero four words subq $0, 8, $0 # .. e1 : EX( stq_u $31, 8($16) ) # e0 : subq $0, 8, $0 # .. e1 : EX( stq_u $31, 16($16) ) # e0 : subq $0, 8, $0 # .. e1 : EX( stq_u $31, 24($16) ) # e0 : subq $0, 8, $0 # .. e1 : subq $1, 4, $1 # e0 : addq $16, 32, $16 # .. e1 : bne $1, 2b # e1 : $tail: bne $2, 1f # e1 : is there a tail to do? ret $31, ($26), 1 # .. e1 : 1: EX( ldq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : nop # e1 : mskqh $5, $0, $5 # e0 : EX( stq_u $5, 0($16) ) # e0 : ret $31, ($26), 1 # .. e1 : __clear_user: and $17, $17, $0 and $16, 7, $4 # e0 : find dest misalignment beq $0, $zerolength # .. e1 : addq $0, $4, $1 # e0 : bias counter and $1, 7, $2 # e1 : number of bytes in tail srl $1, 3, $1 # e0 : beq $4, $loop # .. e1 : EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in beq $1, $oneword # .. e1 : sub-word store? mskql $5, $16, $5 # e0 : take care of misaligned head addq $16, 8, $16 # .. e1 : EX( stq_u $5, -8($16) ) # e0 : addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment subq $1, 1, $1 # e0 : subq $0, 8, $0 # .. e1 : br $loop # e1 : unop # : $oneword: mskql $5, $16, $4 # e0 : mskqh $5, $2, $5 # e0 : or $5, $4, $5 # e1 : EX( stq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : $zerolength: $exception: ret $31, ($26), 1 # .. e1 : .end __clear_user EXPORT_SYMBOL(__clear_user)
AirFortressIlikara/LS2K0300-linux-4.19
4,406
arch/alpha/lib/divide.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/divide.S * * (C) 1995 Linus Torvalds * * Alpha division.. */ /* * The alpha chip doesn't provide hardware division, so we have to do it * by hand. The compiler expects the functions * * __divqu: 64-bit unsigned long divide * __remqu: 64-bit unsigned long remainder * __divqs/__remqs: signed 64-bit * __divlu/__remlu: unsigned 32-bit * __divls/__remls: signed 32-bit * * These are not normal C functions: instead of the normal * calling sequence, these expect their arguments in registers * $24 and $25, and return the result in $27. Register $28 may * be clobbered (assembly temporary), anything else must be saved. * * In short: painful. * * This is a rather simple bit-at-a-time algorithm: it's very good * at dividing random 64-bit numbers, but the more usual case where * the divisor is small is handled better by the DEC algorithm * using lookup tables. This uses much less memory, though, and is * nicer on the cache.. Besides, I don't know the copyright status * of the DEC code. */ /* * My temporaries: * $0 - current bit * $1 - shifted divisor * $2 - modulus/quotient * * $23 - return address * $24 - dividend * $25 - divisor * * $27 - quotient/modulus * $28 - compare status */ #include <asm/export.h> #define halt .long 0 /* * Select function type and registers */ #define mask $0 #define divisor $1 #define compare $28 #define tmp1 $3 #define tmp2 $4 #ifdef DIV #define DIV_ONLY(x,y...) x,##y #define MOD_ONLY(x,y...) #define func(x) __div##x #define modulus $2 #define quotient $27 #define GETSIGN(x) xor $24,$25,x #define STACK 48 #else #define DIV_ONLY(x,y...) #define MOD_ONLY(x,y...) x,##y #define func(x) __rem##x #define modulus $27 #define quotient $2 #define GETSIGN(x) bis $24,$24,x #define STACK 32 #endif /* * For 32-bit operations, we need to extend to 64-bit */ #ifdef INTSIZE #define ufunction func(lu) #define sfunction func(l) #define LONGIFY(x) zapnot x,15,x #define SLONGIFY(x) addl x,0,x #else #define ufunction func(qu) #define sfunction func(q) #define LONGIFY(x) #define SLONGIFY(x) #endif .set noat .align 3 .globl ufunction .ent ufunction ufunction: subq $30,STACK,$30 .frame $30,STACK,$23 .prologue 0 7: stq $1, 0($30) bis $25,$25,divisor stq $2, 8($30) bis $24,$24,modulus stq $0,16($30) bis $31,$31,quotient LONGIFY(divisor) stq tmp1,24($30) LONGIFY(modulus) bis $31,1,mask DIV_ONLY(stq tmp2,32($30)) beq divisor, 9f /* div by zero */ #ifdef INTSIZE /* * shift divisor left, using 3-bit shifts for * 32-bit divides as we can't overflow. Three-bit * shifts will result in looping three times less * here, but can result in two loops more later. * Thus using a large shift isn't worth it (and * s8add pairs better than a sll..) */ 1: cmpult divisor,modulus,compare s8addq divisor,$31,divisor s8addq mask,$31,mask bne compare,1b #else 1: cmpult divisor,modulus,compare blt divisor, 2f addq divisor,divisor,divisor addq mask,mask,mask bne compare,1b unop #endif /* ok, start to go right again.. */ 2: DIV_ONLY(addq quotient,mask,tmp2) srl mask,1,mask cmpule divisor,modulus,compare subq modulus,divisor,tmp1 DIV_ONLY(cmovne compare,tmp2,quotient) srl divisor,1,divisor cmovne compare,tmp1,modulus bne mask,2b 9: ldq $1, 0($30) ldq $2, 8($30) ldq $0,16($30) ldq tmp1,24($30) DIV_ONLY(ldq tmp2,32($30)) addq $30,STACK,$30 ret $31,($23),1 .end ufunction EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but * it's needed in some circumstances. There are different ways to * handle this, really. This does: * -a / b = a / -b = -(a / b) * -a % b = -(a % b) * a % -b = a % b * which is probably not the best solution, but at least should * have the property that (x/y)*y + (x%y) = x. */ .align 3 .globl sfunction .ent sfunction sfunction: subq $30,STACK,$30 .frame $30,STACK,$23 .prologue 0 bis $24,$25,$28 SLONGIFY($28) bge $28,7b stq $24,0($30) subq $31,$24,$28 stq $25,8($30) cmovlt $24,$28,$24 /* abs($24) */ stq $23,16($30) subq $31,$25,$28 stq tmp1,24($30) cmovlt $25,$28,$25 /* abs($25) */ unop bsr $23,ufunction ldq $24,0($30) ldq $25,8($30) GETSIGN($28) subq $31,$27,tmp1 SLONGIFY($28) ldq $23,16($30) cmovlt $28,tmp1,$27 ldq tmp1,24($30) addq $30,STACK,$30 ret $31,($23),1 .end sfunction EXPORT_SYMBOL(sfunction)
AirFortressIlikara/LS2K0300-linux-4.19
2,880
arch/alpha/lib/ev67-strchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strchr.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Return the address of a given character within a null-terminated * string, or null if it is not found. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 4 .globl strchr .ent strchr strchr: .frame sp, 0, ra .prologue 0 ldq_u t0, 0(a0) # L : load first quadword Latency=3 and a1, 0xff, t3 # E : 00000000000000ch insbl a1, 1, t5 # U : 000000000000ch00 insbl a1, 7, a2 # U : ch00000000000000 insbl t3, 6, a3 # U : 00ch000000000000 or t5, t3, a1 # E : 000000000000chch andnot a0, 7, v0 # E : align our loop pointer lda t4, -1 # E : build garbage mask mskqh t4, a0, t4 # U : only want relevant part of first quad or a2, a3, a2 # E : chch000000000000 inswl a1, 2, t5 # E : 00000000chch0000 inswl a1, 4, a3 # E : 0000chch00000000 or a1, a2, a1 # E : chch00000000chch or a3, t5, t5 # E : 0000chchchch0000 cmpbge zero, t0, t2 # E : bits set iff byte == zero cmpbge zero, t4, t4 # E : bits set iff byte is garbage /* This quad is _very_ serialized. Lots of stalling happens */ or t5, a1, a1 # E : chchchchchchchch xor t0, a1, t1 # E : make bytes == c zero cmpbge zero, t1, t3 # E : bits set iff byte == c or t2, t3, t0 # E : bits set iff char match or zero match andnot t0, t4, t0 # E : clear garbage bits cttz t0, a2 # U0 : speculative (in case we get a match) nop # E : bne t0, $found # U : /* * Yuk. This loop is going to stall like crazy waiting for the * data to be loaded. Not much can be done about it unless it's * unrolled multiple times - is that safe to do in kernel space? * Or would exception handling recovery code do the trick here? */ $loop: ldq t0, 8(v0) # L : Latency=3 addq v0, 8, v0 # E : xor t0, a1, t1 # E : cmpbge zero, t0, t2 # E : bits set iff byte == 0 cmpbge zero, t1, t3 # E : bits set iff byte == c or t2, t3, t0 # E : cttz t3, a2 # U0 : speculative (in case we get a match) beq t0, $loop # U : $found: negq t0, t1 # E : clear all but least set bit and t0, t1, t0 # E : and t0, t3, t1 # E : bit set iff byte was the char addq v0, a2, v0 # E : Add in the bit number from above cmoveq t1, $31, v0 # E : Two mapping slots, latency = 2 nop nop ret # L0 : .end strchr EXPORT_SYMBOL(strchr)
AirFortressIlikara/LS2K0300-linux-4.19
8,994
arch/alpha/lib/stxcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/stxcpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy a null-terminated string from SRC to DST. * * This is an internal routine used by strcpy, stpcpy, and strcat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * * On output: * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * * Furthermore, v0, a3-a5, t11, and t12 are untouched. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxcpy_aligned .align 3 stxcpy_aligned: .frame sp, 0, t9 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # e1 : build a mask against false zero mskqh t2, a1, t2 # e0 : detection in the src word mskqh t1, a1, t3 # e0 : ornot t1, t2, t2 # .. e1 : mskql t0, a1, t0 # e0 : assemble the first output word cmpbge zero, t2, t8 # .. e1 : bits set iff null found or t0, t3, t1 # e0 : bne t8, $a_eos # .. e1 : /* On entry to this basic block: t0 == the first destination word for masking back in t1 == a source word not containing a null. */ $a_loop: stq_u t1, 0(a0) # e0 : addq a0, 8, a0 # .. e1 : ldq_u t1, 0(a1) # e0 : addq a1, 8, a1 # .. e1 : cmpbge zero, t1, t8 # e0 (stall) beq t8, $a_loop # .. e1 (zdb) /* Take care of the final (partial) word store. On entry to this basic block we have: t1 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t6 # e0 : find low bit set and t8, t6, t12 # e1 (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # e0 : bne t6, 1f # .. e1 (zdb) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t0, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : zapnot t1, t6, t1 # e0 : clear src bytes >= null or t12, t6, t8 # .. e1 : zap t0, t8, t0 # e0 : clear dst bytes <= null or t0, t1, t1 # e1 : 1: stq_u t1, 0(a0) # e0 : ret (t9) # .. e1 : .end stxcpy_aligned .align 3 .ent __stxcpy .globl __stxcpy __stxcpy: .frame sp, 0, t9 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t0 # e0 : unop # : and t0, 7, t0 # e0 : bne t0, $unaligned # .. e1 : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # e0 : load first src word and a0, 7, t0 # .. e1 : take care not to load a word ... addq a1, 8, a1 # e0 : beq t0, stxcpy_aligned # .. e1 : ... if we wont need it ldq_u t0, 0(a0) # e0 : br stxcpy_aligned # .. e1 : /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 3 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, for masking back in, if needed else 0 t1 == the low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # e0 : addq a1, 8, a1 # .. e1 : extql t1, a1, t1 # e0 : extqh t2, a1, t4 # e0 : mskql t0, a0, t0 # e0 : or t1, t4, t1 # .. e1 : mskqh t1, a0, t1 # e0 : or t0, t1, t1 # e1 : or t1, t6, t6 # e0 : cmpbge zero, t6, t8 # .. e1 : lda t6, -1 # e0 : for masking just below bne t8, $u_final # .. e1 : mskql t6, a1, t6 # e0 : mask out the bits we have or t6, t2, t2 # e1 : already extracted before cmpbge zero, t2, t8 # e0 : testing eos bne t8, $u_late_head_exit # .. e1 (zdb) /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ stq_u t1, 0(a0) # e0 : store first output word addq a0, 8, a0 # .. e1 : extql t2, a1, t0 # e0 : position ho-bits of lo word ldq_u t2, 8(a1) # .. e1 : read next high-order source word addq a1, 8, a1 # e0 : cmpbge zero, t2, t8 # .. e1 : nop # e0 : bne t8, $u_eos # .. e1 : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: extqh t2, a1, t1 # e0 : extract high bits for current word addq a1, 8, a1 # .. e1 : extql t2, a1, t3 # e0 : extract low bits for next time addq a0, 8, a0 # .. e1 : or t0, t1, t1 # e0 : current dst word now complete ldq_u t2, 0(a1) # .. e1 : load high word for next time stq_u t1, -8(a0) # e0 : save the current word mov t3, t0 # .. e1 : cmpbge zero, t2, t8 # e0 : test new word for eos beq t8, $u_loop # .. e1 : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: extqh t2, a1, t1 # e0 : or t0, t1, t1 # e1 : first (partial) source word complete cmpbge zero, t1, t8 # e0 : is the null in this first bit? bne t8, $u_final # .. e1 (zdb) $u_late_head_exit: stq_u t1, 0(a0) # e0 : the null was in the high-order bits addq a0, 8, a0 # .. e1 : extql t2, a1, t1 # e0 : cmpbge zero, t1, t8 # .. e1 : /* Take care of a final (probably partial) result word. On entry to this basic block: t1 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # e0 : isolate low bit set and t6, t8, t12 # e1 : and t12, 0x80, t6 # e0 : avoid dest word load if we can bne t6, 1f # .. e1 (zdb) ldq_u t0, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t6, t12, t8 # e0 : zapnot t1, t6, t1 # .. e1 : kill source bytes >= null zap t0, t8, t0 # e0 : kill dest bytes <= null or t0, t1, t1 # e1 : 1: stq_u t1, 0(a0) # e0 : ret (t9) # .. e1 : /* Unaligned copy entry point. */ .align 3 $unaligned: ldq_u t1, 0(a1) # e0 : load first source word and a0, 7, t4 # .. e1 : find dest misalignment and a1, 7, t5 # e0 : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # .. e1 : mov zero, t6 # e0 : beq t4, 1f # .. e1 : ldq_u t0, 0(a0) # e0 : lda t6, -1 # .. e1 : mskql t6, a0, t6 # e0 : 1: subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ cmplt t4, t5, t12 # e0 : beq t12, $u_head # .. e1 (zdb) lda t2, -1 # e1 : mask out leading garbage in source mskqh t2, t5, t2 # e0 : nop # e0 : ornot t1, t2, t3 # .. e1 : cmpbge zero, t3, t8 # e0 : is there a zero? beq t8, $u_head # .. e1 (zdb) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # e0 : negq t8, t6 # .. e1 : build bitmask of bytes <= zero and t6, t8, t12 # e0 : and a1, 7, t5 # .. e1 : subq t12, 1, t6 # e0 : or t6, t12, t8 # e1 : srl t12, t5, t12 # e0 : adjust final null return value zapnot t2, t8, t2 # .. e1 : prepare source word; mirror changes and t1, t2, t1 # e1 : to source validity mask extql t2, a1, t2 # .. e0 : extql t1, a1, t1 # e0 : andnot t0, t2, t0 # .. e1 : zero place for source to reside or t0, t1, t1 # e1 : and put it there stq_u t1, 0(a0) # .. e0 : ret (t9) # e1 : .end __stxcpy
AirFortressIlikara/LS2K0300-linux-4.19
16,394
arch/alpha/lib/ev6-memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memset.S * * This is an efficient (and relatively small) implementation of the C library * "memset()" function for the 21264 implementation of Alpha. * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * The algorithm for the leading and trailing quadwords remains the same, * however the loop has been unrolled to enable better memory throughput, * and the code has been replicated for each of the entry points: __memset * and __memset16 to permit better scheduling to eliminate the stalling * encountered during the mask replication. * A future enhancement might be to put in a byte store loop for really * small (say < 32 bytes) memset()s. Whether or not that change would be * a win in the kernel would depend upon the contextual usage. * WARNING: Maintaining this is going to be more work than the above version, * as fixes will need to be made in multiple places. The performance gain * is worth it. */ #include <asm/export.h> .set noat .set noreorder .text .globl memset .globl __memset .globl ___memset .globl __memset16 .globl __constant_c_memset .ent ___memset .align 5 ___memset: .frame $30,0,$26,0 .prologue 0 /* * Serious stalling happens. The only way to mitigate this is to * undertake a major re-write to interleave the constant materialization * with other parts of the fall-through code. This is important, even * though it makes maintenance tougher. * Do this later. */ and $17,255,$1 # E : 00000000000000ch insbl $17,1,$2 # U : 000000000000ch00 bis $16,$16,$0 # E : return value ble $18,end_b # U : zero length requested? addq $18,$16,$6 # E : max address to write to bis $1,$2,$17 # E : 000000000000chch insbl $1,2,$3 # U : 0000000000ch0000 insbl $1,3,$4 # U : 00000000ch000000 or $3,$4,$3 # E : 00000000chch0000 inswl $17,4,$5 # U : 0000chch00000000 xor $16,$6,$1 # E : will complete write be within one quadword? inswl $17,6,$2 # U : chch000000000000 or $17,$3,$17 # E : 00000000chchchch or $2,$5,$2 # E : chchchch00000000 bic $1,7,$1 # E : fit within a single quadword? and $16,7,$3 # E : Target addr misalignment or $17,$2,$17 # E : chchchchchchchch beq $1,within_quad_b # U : nop # E : beq $3,aligned_b # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned_b: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad_b # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop_b # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign_b # U : $alignmod64_b: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64_b # U : $bigalign_b: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64_b: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64_b # U : nop nop nop beq $3, no_quad_b # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop_b: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop_b # U : more? no_quad_b: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end_b # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_quad_b: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end_b: nop nop nop ret $31,($26),1 # L0 : .end ___memset EXPORT_SYMBOL(___memset) /* * This is the original body of code, prior to replication and * rescheduling. Leave it here, as there may be calls to this * entry point. */ .align 4 .ent __constant_c_memset __constant_c_memset: .frame $30,0,$26,0 .prologue 0 addq $18,$16,$6 # E : max address to write to bis $16,$16,$0 # E : return value xor $16,$6,$1 # E : will complete write be within one quadword? ble $18,end # U : zero length requested? bic $1,7,$1 # E : fit within a single quadword beq $1,within_one_quad # U : and $16,7,$3 # E : Target addr misalignment beq $3,aligned # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign # U : $alignmod64: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64 # U : $bigalign: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64 # U : nop nop nop beq $3, no_quad # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop # U : more? no_quad: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_one_quad: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end: nop nop nop ret $31,($26),1 # L0 : .end __constant_c_memset EXPORT_SYMBOL(__constant_c_memset) /* * This is a replicant of the __constant_c_memset code, rescheduled * to mask stalls. Note that entry point names also had to change */ .align 5 .ent __memset16 __memset16: .frame $30,0,$26,0 .prologue 0 inswl $17,0,$5 # U : 000000000000c1c2 inswl $17,2,$2 # U : 00000000c1c20000 bis $16,$16,$0 # E : return value addq $18,$16,$6 # E : max address to write to ble $18, end_w # U : zero length requested? inswl $17,4,$3 # U : 0000c1c200000000 inswl $17,6,$4 # U : c1c2000000000000 xor $16,$6,$1 # E : will complete write be within one quadword? or $2,$5,$2 # E : 00000000c1c2c1c2 or $3,$4,$17 # E : c1c2c1c200000000 bic $1,7,$1 # E : fit within a single quadword and $16,7,$3 # E : Target addr misalignment or $17,$2,$17 # E : c1c2c1c2c1c2c1c2 beq $1,within_quad_w # U : nop beq $3,aligned_w # U : target is 0mod8 /* * Target address is misaligned, and won't fit within a quadword */ ldq_u $4,0($16) # L : Fetch first partial bis $16,$16,$5 # E : Save the address insql $17,$16,$2 # U : Insert new bytes subq $3,8,$3 # E : Invert (for addressing uses) addq $18,$3,$18 # E : $18 is new count ($3 is negative) mskql $4,$16,$4 # U : clear relevant parts of the quad subq $16,$3,$16 # E : $16 is new aligned destination bis $2,$4,$1 # E : Final bytes nop stq_u $1,0($5) # L : Store result nop nop .align 4 aligned_w: /* * We are now guaranteed to be quad aligned, with at least * one partial quad to write. */ sra $18,3,$3 # U : Number of remaining quads to write and $18,7,$18 # E : Number of trailing bytes to write bis $16,$16,$5 # E : Save dest address beq $3,no_quad_w # U : tail stuff only /* * it's worth the effort to unroll this and use wh64 if possible * Lifted a bunch of code from clear_user.S * At this point, entry values are: * $16 Current destination address * $5 A copy of $16 * $6 The max quadword address to write to * $18 Number trailer bytes * $3 Number quads to write */ and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop) subq $3, 16, $4 # E : Only try to unroll if > 128 bytes subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64) blt $4, loop_w # U : /* * We know we've got at least 16 quads, minimum of one trip * through unrolled loop. Do a quad at a time to get us 0mod64 * aligned. */ nop # E : nop # E : nop # E : beq $1, $bigalign_w # U : $alignmod64_w: stq $17, 0($5) # L : subq $3, 1, $3 # E : For consistency later addq $1, 8, $1 # E : Increment towards zero for alignment addq $5, 8, $4 # E : Initial wh64 address (filler instruction) nop nop addq $5, 8, $5 # E : Inc address blt $1, $alignmod64_w # U : $bigalign_w: /* * $3 - number quads left to go * $5 - target address (aligned 0mod64) * $17 - mask of stuff to store * Scratch registers available: $7, $2, $4, $1 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ $do_wh64_w: wh64 ($4) # L1 : memory subsystem write hint subq $3, 24, $2 # E : For determining future wh64 addresses stq $17, 0($5) # L : nop # E : addq $5, 128, $4 # E : speculative target of next wh64 stq $17, 8($5) # L : stq $17, 16($5) # L : addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr) stq $17, 24($5) # L : stq $17, 32($5) # L : cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle nop stq $17, 40($5) # L : stq $17, 48($5) # L : subq $3, 16, $2 # E : Repeat the loop at least once more? nop stq $17, 56($5) # L : addq $5, 64, $5 # E : subq $3, 8, $3 # E : bge $2, $do_wh64_w # U : nop nop nop beq $3, no_quad_w # U : Might have finished already .align 4 /* * Simple loop for trailing quadwords, or for small amounts * of data (where we can't use an unrolled loop and wh64) */ loop_w: stq $17,0($5) # L : subq $3,1,$3 # E : Decrement number quads left addq $5,8,$5 # E : Inc address bne $3,loop_w # U : more? no_quad_w: /* * Write 0..7 trailing bytes. */ nop # E : beq $18,end_w # U : All done? ldq $7,0($5) # L : mskqh $7,$6,$2 # U : Mask final quad insqh $17,$6,$4 # U : New bits bis $2,$4,$1 # E : Put it all together stq $1,0($5) # L : And back to memory ret $31,($26),1 # L0 : within_quad_w: ldq_u $1,0($16) # L : insql $17,$16,$2 # U : New bits mskql $1,$16,$4 # U : Clear old bis $2,$4,$2 # E : New result mskql $2,$6,$4 # U : mskqh $1,$6,$2 # U : bis $2,$4,$1 # E : stq_u $1,0($16) # L : end_w: nop nop nop ret $31,($26),1 # L0 : .end __memset16 EXPORT_SYMBOL(__memset16) memset = ___memset __memset = ___memset EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset)
AirFortressIlikara/LS2K0300-linux-4.19
2,999
arch/alpha/lib/callback_srm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/callback_srm.S */ #include <asm/console.h> #include <asm/export.h> .text #define HWRPB_CRB_OFFSET 0xc0 #if defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) .align 4 srm_dispatch: #if defined(CONFIG_ALPHA_GENERIC) ldl $4,alpha_using_srm beq $4,nosrm #endif ldq $0,hwrpb # gp is set up by CALLBACK macro. ldl $25,0($25) # Pick up the wrapper data. mov $20,$21 # Shift arguments right. mov $19,$20 ldq $1,HWRPB_CRB_OFFSET($0) mov $18,$19 mov $17,$18 mov $16,$17 addq $0,$1,$2 # CRB address ldq $27,0($2) # DISPATCH procedure descriptor (VMS call std) extwl $25,0,$16 # SRM callback function code ldq $3,8($27) # call address extwl $25,2,$25 # argument information (VMS calling std) jmp ($3) # Return directly to caller of wrapper. .align 4 .globl srm_fixup .ent srm_fixup srm_fixup: ldgp $29,0($27) #if defined(CONFIG_ALPHA_GENERIC) ldl $4,alpha_using_srm beq $4,nosrm #endif ldq $0,hwrpb ldq $1,HWRPB_CRB_OFFSET($0) addq $0,$1,$2 # CRB address ldq $27,16($2) # VA of FIXUP procedure descriptor ldq $3,8($27) # call address lda $25,2($31) # two integer arguments jmp ($3) # Return directly to caller of srm_fixup. .end srm_fixup #if defined(CONFIG_ALPHA_GENERIC) .align 3 nosrm: lda $0,-1($31) ret #endif #define CALLBACK(NAME, CODE, ARG_CNT) \ .align 4; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \ ldgp $29,0($27); br $25,srm_dispatch; .word CODE, ARG_CNT; .end callback_##NAME #else /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */ #define CALLBACK(NAME, CODE, ARG_CNT) \ .align 3; .globl callback_##NAME; .ent callback_##NAME; callback_##NAME##: \ lda $0,-1($31); ret; .end callback_##NAME .align 3 .globl srm_fixup .ent srm_fixup srm_fixup: lda $0,-1($31) ret .end srm_fixup #endif /* defined(CONFIG_ALPHA_SRM) || defined(CONFIG_ALPHA_GENERIC) */ CALLBACK(puts, CCB_PUTS, 4) CALLBACK(open, CCB_OPEN, 3) CALLBACK(close, CCB_CLOSE, 2) CALLBACK(read, CCB_READ, 5) CALLBACK(open_console, CCB_OPEN_CONSOLE, 1) CALLBACK(close_console, CCB_CLOSE_CONSOLE, 1) CALLBACK(getenv, CCB_GET_ENV, 4) CALLBACK(setenv, CCB_SET_ENV, 4) CALLBACK(getc, CCB_GETC, 2) CALLBACK(reset_term, CCB_RESET_TERM, 2) CALLBACK(term_int, CCB_SET_TERM_INT, 3) CALLBACK(term_ctl, CCB_SET_TERM_CTL, 3) CALLBACK(process_keycode, CCB_PROCESS_KEYCODE, 3) CALLBACK(ioctl, CCB_IOCTL, 6) CALLBACK(write, CCB_WRITE, 5) CALLBACK(reset_env, CCB_RESET_ENV, 4) CALLBACK(save_env, CCB_SAVE_ENV, 1) CALLBACK(pswitch, CCB_PSWITCH, 3) CALLBACK(bios_emul, CCB_BIOS_EMUL, 5) EXPORT_SYMBOL(callback_getenv) EXPORT_SYMBOL(callback_setenv) EXPORT_SYMBOL(callback_save_env) .data __alpha_using_srm: # For use by bootpheader .long 7 # value is not 1 for link debugging .weak alpha_using_srm; alpha_using_srm = __alpha_using_srm __callback_init_done: # For use by bootpheader .long 7 # value is not 1 for link debugging .weak callback_init_done; callback_init_done = __callback_init_done
AirFortressIlikara/LS2K0300-linux-4.19
6,378
arch/alpha/lib/ev6-memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memcpy.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Reasonably optimized memcpy() routine for the Alpha 21264 * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * * Temp usage notes: * $1,$2, - scratch */ #include <asm/export.h> .set noreorder .set noat .align 4 .globl memcpy .ent memcpy memcpy: .frame $30,0,$26,0 .prologue 0 mov $16, $0 # E : copy dest to return ble $18, $nomoredata # U : done with the copy? xor $16, $17, $1 # E : are source and dest alignments the same? and $1, 7, $1 # E : are they the same mod 8? bne $1, $misaligned # U : Nope - gotta do this the slow way /* source and dest are same mod 8 address */ and $16, 7, $1 # E : Are both 0mod8? beq $1, $both_0mod8 # U : Yes nop # E : /* * source and dest are same misalignment. move a byte at a time * until a 0mod8 alignment for both is reached. * At least one byte more to move */ $head_align: ldbu $1, 0($17) # L : grab a byte subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ stb $1, 0($16) # L : addq $16, 1, $16 # E : dest++ and $16, 7, $1 # E : Are we at 0mod8 yet? ble $18, $nomoredata # U : done with the copy? bne $1, $head_align # U : $both_0mod8: cmple $18, 127, $1 # E : Can we unroll the loop? bne $1, $no_unroll # U : and $16, 63, $1 # E : get mod64 alignment beq $1, $do_unroll # U : no single quads to fiddle $single_head_quad: ldq $1, 0($17) # L : get 8 bytes subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 nop # E : stq $1, 0($16) # L : store addq $16, 8, $16 # E : dest += 8 and $16, 63, $1 # E : get mod64 alignment bne $1, $single_head_quad # U : still not fully aligned $do_unroll: addq $16, 64, $7 # E : Initial (+1 trip) wh64 address cmple $18, 127, $1 # E : Can we go through the unrolled loop? bne $1, $tail_quads # U : Nope nop # E : $unroll_body: wh64 ($7) # L1 : memory subsystem hint: 64 bytes at # ($7) are about to be over-written ldq $6, 0($17) # L0 : bytes 0..7 nop # E : nop # E : ldq $4, 8($17) # L : bytes 8..15 ldq $5, 16($17) # L : bytes 16..23 addq $7, 64, $7 # E : Update next wh64 address nop # E : ldq $3, 24($17) # L : bytes 24..31 addq $16, 64, $1 # E : fallback value for wh64 nop # E : nop # E : addq $17, 32, $17 # E : src += 32 bytes stq $6, 0($16) # L : bytes 0..7 nop # E : nop # E : stq $4, 8($16) # L : bytes 8..15 stq $5, 16($16) # L : bytes 16..23 subq $18, 192, $2 # E : At least two more trips to go? nop # E : stq $3, 24($16) # L : bytes 24..31 addq $16, 32, $16 # E : dest += 32 bytes nop # E : nop # E : ldq $6, 0($17) # L : bytes 0..7 ldq $4, 8($17) # L : bytes 8..15 cmovlt $2, $1, $7 # E : Latency 2, extra map slot - Use # fallback wh64 address if < 2 more trips nop # E : ldq $5, 16($17) # L : bytes 16..23 ldq $3, 24($17) # L : bytes 24..31 addq $16, 32, $16 # E : dest += 32 subq $18, 64, $18 # E : count -= 64 addq $17, 32, $17 # E : src += 32 stq $6, -32($16) # L : bytes 0..7 stq $4, -24($16) # L : bytes 8..15 cmple $18, 63, $1 # E : At least one more trip? stq $5, -16($16) # L : bytes 16..23 stq $3, -8($16) # L : bytes 24..31 nop # E : beq $1, $unroll_body $tail_quads: $no_unroll: .align 4 subq $18, 8, $18 # E : At least a quad left? blt $18, $less_than_8 # U : Nope nop # E : nop # E : $move_a_quad: ldq $1, 0($17) # L : fetch 8 subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 nop # E : stq $1, 0($16) # L : store 8 addq $16, 8, $16 # E : dest += 8 bge $18, $move_a_quad # U : nop # E : $less_than_8: .align 4 addq $18, 8, $18 # E : add back for trailing bytes ble $18, $nomoredata # U : All-done nop # E : nop # E : /* Trailing bytes */ $tail_bytes: subq $18, 1, $18 # E : count-- ldbu $1, 0($17) # L : fetch a byte addq $17, 1, $17 # E : src++ nop # E : stb $1, 0($16) # L : store a byte addq $16, 1, $16 # E : dest++ bgt $18, $tail_bytes # U : more to be done? nop # E : /* branching to exit takes 3 extra cycles, so replicate exit here */ ret $31, ($26), 1 # L0 : nop # E : nop # E : nop # E : $misaligned: mov $0, $4 # E : dest temp and $0, 7, $1 # E : dest alignment mod8 beq $1, $dest_0mod8 # U : life doesnt totally suck nop $aligndest: ble $18, $nomoredata # U : ldbu $1, 0($17) # L : fetch a byte subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ stb $1, 0($4) # L : store it addq $4, 1, $4 # E : dest++ and $4, 7, $1 # E : dest 0mod8 yet? bne $1, $aligndest # U : go until we are aligned. /* Source has unknown alignment, but dest is known to be 0mod8 */ $dest_0mod8: subq $18, 8, $18 # E : At least a quad left? blt $18, $misalign_tail # U : Nope ldq_u $3, 0($17) # L : seed (rotating load) of 8 bytes nop # E : $mis_quad: ldq_u $16, 8($17) # L : Fetch next 8 extql $3, $17, $3 # U : masking extqh $16, $17, $1 # U : masking bis $3, $1, $1 # E : merged bytes to store subq $18, 8, $18 # E : count -= 8 addq $17, 8, $17 # E : src += 8 stq $1, 0($4) # L : store 8 (aligned) mov $16, $3 # E : "rotate" source data addq $4, 8, $4 # E : dest += 8 bge $18, $mis_quad # U : More quads to move nop nop $misalign_tail: addq $18, 8, $18 # E : account for tail stuff ble $18, $nomoredata # U : nop nop $misalign_byte: ldbu $1, 0($17) # L : fetch 1 subq $18, 1, $18 # E : count-- addq $17, 1, $17 # E : src++ nop # E : stb $1, 0($4) # L : store addq $4, 1, $4 # E : dest++ bgt $18, $misalign_byte # U : more to go? nop $nomoredata: ret $31, ($26), 1 # L0 : nop # E : nop # E : nop # E : .end memcpy EXPORT_SYMBOL(memcpy) /* For backwards module compatibility. */ __memcpy = memcpy .globl __memcpy
AirFortressIlikara/LS2K0300-linux-4.19
9,666
arch/alpha/lib/ev6-stxcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-stxcpy.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Copy a null-terminated string from SRC to DST. * * This is an internal routine used by strcpy, stpcpy, and strcat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * * On output: * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * * Furthermore, v0, a3-a5, t11, and t12 are untouched. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxcpy_aligned .align 4 stxcpy_aligned: .frame sp, 0, t9 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # E : build a mask against false zero mskqh t2, a1, t2 # U : detection in the src word (stall) mskqh t1, a1, t3 # U : ornot t1, t2, t2 # E : (stall) mskql t0, a1, t0 # U : assemble the first output word cmpbge zero, t2, t8 # E : bits set iff null found or t0, t3, t1 # E : (stall) bne t8, $a_eos # U : (stall) /* On entry to this basic block: t0 == the first destination word for masking back in t1 == a source word not containing a null. */ /* Nops here to separate store quads from load quads */ $a_loop: stq_u t1, 0(a0) # L : addq a0, 8, a0 # E : nop nop ldq_u t1, 0(a1) # L : Latency=3 addq a1, 8, a1 # E : cmpbge zero, t1, t8 # E : (3 cycle stall) beq t8, $a_loop # U : (stall for t8) /* Take care of the final (partial) word store. On entry to this basic block we have: t1 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t6 # E : find low bit set and t8, t6, t12 # E : (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # E : (stall) bne t6, 1f # U : (stall) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t0, 0(a0) # L : Latency=3 subq t12, 1, t6 # E : zapnot t1, t6, t1 # U : clear src bytes >= null (stall) or t12, t6, t8 # E : (stall) zap t0, t8, t0 # E : clear dst bytes <= null or t0, t1, t1 # E : (stall) nop nop 1: stq_u t1, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop .end stxcpy_aligned .align 4 .ent __stxcpy .globl __stxcpy __stxcpy: .frame sp, 0, t9 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t0 # E : unop # E : and t0, 7, t0 # E : (stall) bne t0, $unaligned # U : (stall) /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # L : load first src word and a0, 7, t0 # E : take care not to load a word ... addq a1, 8, a1 # E : beq t0, stxcpy_aligned # U : ... if we wont need it (stall) ldq_u t0, 0(a0) # L : br stxcpy_aligned # L0 : Latency=3 nop nop /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 4 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, for masking back in, if needed else 0 t1 == the low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # L : addq a1, 8, a1 # E : extql t1, a1, t1 # U : (stall on a1) extqh t2, a1, t4 # U : (stall on a1) mskql t0, a0, t0 # U : or t1, t4, t1 # E : mskqh t1, a0, t1 # U : (stall on t1) or t0, t1, t1 # E : (stall on t1) or t1, t6, t6 # E : cmpbge zero, t6, t8 # E : (stall) lda t6, -1 # E : for masking just below bne t8, $u_final # U : (stall) mskql t6, a1, t6 # U : mask out the bits we have or t6, t2, t2 # E : already extracted before (stall) cmpbge zero, t2, t8 # E : testing eos (stall) bne t8, $u_late_head_exit # U : (stall) /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ stq_u t1, 0(a0) # L : store first output word addq a0, 8, a0 # E : extql t2, a1, t0 # U : position ho-bits of lo word ldq_u t2, 8(a1) # U : read next high-order source word addq a1, 8, a1 # E : cmpbge zero, t2, t8 # E : (stall for t2) nop # E : bne t8, $u_eos # U : (stall) /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: extqh t2, a1, t1 # U : extract high bits for current word addq a1, 8, a1 # E : (stall) extql t2, a1, t3 # U : extract low bits for next time (stall) addq a0, 8, a0 # E : or t0, t1, t1 # E : current dst word now complete ldq_u t2, 0(a1) # L : Latency=3 load high word for next time stq_u t1, -8(a0) # L : save the current word (stall) mov t3, t0 # E : cmpbge zero, t2, t8 # E : test new word for eos beq t8, $u_loop # U : (stall) nop nop /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: extqh t2, a1, t1 # U : or t0, t1, t1 # E : first (partial) source word complete (stall) cmpbge zero, t1, t8 # E : is the null in this first bit? (stall) bne t8, $u_final # U : (stall) $u_late_head_exit: stq_u t1, 0(a0) # L : the null was in the high-order bits addq a0, 8, a0 # E : extql t2, a1, t1 # U : cmpbge zero, t1, t8 # E : (stall) /* Take care of a final (probably partial) result word. On entry to this basic block: t1 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # E : isolate low bit set and t6, t8, t12 # E : (stall) and t12, 0x80, t6 # E : avoid dest word load if we can (stall) bne t6, 1f # U : (stall) ldq_u t0, 0(a0) # E : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) zapnot t1, t6, t1 # U : kill source bytes >= null (stall) zap t0, t8, t0 # U : kill dest bytes <= null (2 cycle data stall) or t0, t1, t1 # E : (stall) nop nop 1: stq_u t1, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop /* Unaligned copy entry point. */ .align 4 $unaligned: ldq_u t1, 0(a1) # L : load first source word and a0, 7, t4 # E : find dest misalignment and a1, 7, t5 # E : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # E : mov zero, t6 # E : beq t4, 1f # U : ldq_u t0, 0(a0) # L : lda t6, -1 # E : mskql t6, a0, t6 # U : nop nop nop 1: subq a1, t4, a1 # E : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ cmplt t4, t5, t12 # E : beq t12, $u_head # U : lda t2, -1 # E : mask out leading garbage in source mskqh t2, t5, t2 # U : ornot t1, t2, t3 # E : (stall) cmpbge zero, t3, t8 # E : is there a zero? (stall) beq t8, $u_head # U : (stall) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # L : negq t8, t6 # E : build bitmask of bytes <= zero and t6, t8, t12 # E : (stall) and a1, 7, t5 # E : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) srl t12, t5, t12 # U : adjust final null return value zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall) and t1, t2, t1 # E : to source validity mask extql t2, a1, t2 # U : extql t1, a1, t1 # U : (stall) andnot t0, t2, t0 # .. e1 : zero place for source to reside (stall) or t0, t1, t1 # e1 : and put it there stq_u t1, 0(a0) # .. e0 : (stall) ret (t9) # e1 : nop .end __stxcpy
AirFortressIlikara/LS2K0300-linux-4.19
1,914
arch/alpha/lib/strncat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strncat.S * Contributed by Richard Henderson (rth@tamu.edu) * * Append no more than COUNT characters from the null-terminated string SRC * to the null-terminated string DST. Always null-terminate the new DST. * * This differs slightly from the semantics in libc in that we never write * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. */ #include <asm/export.h> .text .align 3 .globl strncat .ent strncat strncat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value beq $18, $zerocount /* Find the end of the string. */ ldq_u $1, 0($16) # load first quadword ($16 may be misaligned) lda $2, -1($31) insqh $2, $16, $2 andnot $16, 7, $16 or $2, $1, $1 cmpbge $31, $1, $2 # bits set iff byte == 0 bne $2, $found $loop: ldq $1, 8($16) addq $16, 8, $16 cmpbge $31, $1, $2 beq $2, $loop $found: negq $2, $3 # clear all but least set bit and $2, $3, $2 and $2, 0xf0, $3 # binary search for that set bit and $2, 0xcc, $4 and $2, 0xaa, $5 cmovne $3, 4, $3 cmovne $4, 2, $4 cmovne $5, 1, $5 addq $3, $4, $3 addq $16, $5, $16 addq $16, $3, $16 /* Now do the append. */ bsr $23, __stxncpy /* Worry about the null termination. */ zapnot $1, $27, $2 # was last byte a null? bne $2, 0f ret 0: cmplt $27, $24, $2 # did we fill the buffer completely? or $2, $18, $2 bne $2, 2f and $24, 0x80, $2 # no zero next byte bne $2, 1f /* Here there are bytes left in the current word. Clear one. */ addq $24, $24, $24 # end-of-count bit <<= 1 2: zap $1, $24, $1 stq_u $1, 0($16) ret 1: /* Here we must read the next DST word and clear the first byte. */ ldq_u $1, 8($16) zap $1, 1, $1 stq_u $1, 8($16) $zerocount: ret .end strncat EXPORT_SYMBOL(strncat)
AirFortressIlikara/LS2K0300-linux-4.19
10,883
arch/alpha/lib/stxncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/stxncpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. * * This is an internal routine used by strncpy, stpncpy, and strncat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * a2 = COUNT * * Furthermore, COUNT may not be zero. * * On output: * t0 = last word written * t10 = bitmask (with one bit set) indicating the byte position of * the end of the range specified by COUNT * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * a2 = the number of full words left in COUNT * * Furthermore, v0, a3-a5, t11, and $at are untouched. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxncpy_aligned .align 3 stxncpy_aligned: .frame sp, 0, t9, 0 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # e1 : build a mask against false zero mskqh t2, a1, t2 # e0 : detection in the src word mskqh t1, a1, t3 # e0 : ornot t1, t2, t2 # .. e1 : mskql t0, a1, t0 # e0 : assemble the first output word cmpbge zero, t2, t8 # .. e1 : bits set iff null found or t0, t3, t0 # e0 : beq a2, $a_eoc # .. e1 : bne t8, $a_eos # .. e1 : /* On entry to this basic block: t0 == a source word not containing a null. */ $a_loop: stq_u t0, 0(a0) # e0 : addq a0, 8, a0 # .. e1 : ldq_u t0, 0(a1) # e0 : addq a1, 8, a1 # .. e1 : subq a2, 1, a2 # e0 : cmpbge zero, t0, t8 # .. e1 (stall) beq a2, $a_eoc # e1 : beq t8, $a_loop # e1 : /* Take care of the final (partial) word store. At this point the end-of-count bit is set in t8 iff it applies. On entry to this basic block we have: t0 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t12 # e0 : find low bit set and t8, t12, t12 # e1 (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # e0 : bne t6, 1f # .. e1 (zdb) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t1, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t12, t6, t8 # e0 : unop # zapnot t0, t8, t0 # e0 : clear src bytes > null zap t1, t8, t1 # .. e1 : clear dst bytes <= null or t0, t1, t0 # e1 : 1: stq_u t0, 0(a0) # e0 : ret (t9) # e1 : /* Add the end-of-count bit to the eos detection bitmask. */ $a_eoc: or t10, t8, t8 br $a_eos .end stxncpy_aligned .align 3 .ent __stxncpy .globl __stxncpy __stxncpy: .frame sp, 0, t9, 0 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t1 # e0 : and a0, 7, t0 # .. e1 : find dest misalignment and t1, 7, t1 # e0 : addq a2, t0, a2 # .. e1 : bias count by dest misalignment subq a2, 1, a2 # e0 : and a2, 7, t2 # e1 : srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 addq zero, 1, t10 # .. e1 : sll t10, t2, t10 # e0 : t10 = bitmask of last count byte bne t1, $unaligned # .. e1 : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # e0 : load first src word addq a1, 8, a1 # .. e1 : beq t0, stxncpy_aligned # avoid loading dest word if not needed ldq_u t0, 0(a0) # e0 : br stxncpy_aligned # .. e1 : /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 3 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, unmasked t1 == the shifted low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # e0 : load second src word addq a1, 8, a1 # .. e1 : mskql t0, a0, t0 # e0 : mask trailing garbage in dst extqh t2, a1, t4 # e0 : or t1, t4, t1 # e1 : first aligned src word complete mskqh t1, a0, t1 # e0 : mask leading garbage in src or t0, t1, t0 # e0 : first output word complete or t0, t6, t6 # e1 : mask original data for zero test cmpbge zero, t6, t8 # e0 : beq a2, $u_eocfin # .. e1 : lda t6, -1 # e0 : bne t8, $u_final # .. e1 : mskql t6, a1, t6 # e0 : mask out bits already seen nop # .. e1 : stq_u t0, 0(a0) # e0 : store first output word or t6, t2, t2 # .. e1 : cmpbge zero, t2, t8 # e0 : find nulls in second partial addq a0, 8, a0 # .. e1 : subq a2, 1, a2 # e0 : bne t8, $u_late_head_exit # .. e1 : /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ extql t2, a1, t1 # e0 : position hi-bits of lo word beq a2, $u_eoc # .. e1 : ldq_u t2, 8(a1) # e0 : read next high-order source word addq a1, 8, a1 # .. e1 : extqh t2, a1, t0 # e0 : position lo-bits of hi word (stall) cmpbge zero, t2, t8 # .. e1 : nop # e0 : bne t8, $u_eos # .. e1 : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 3 $u_loop: or t0, t1, t0 # e0 : current dst word now complete subq a2, 1, a2 # .. e1 : decrement word count stq_u t0, 0(a0) # e0 : save the current word addq a0, 8, a0 # .. e1 : extql t2, a1, t1 # e0 : extract high bits for next time beq a2, $u_eoc # .. e1 : ldq_u t2, 8(a1) # e0 : load high word for next time addq a1, 8, a1 # .. e1 : nop # e0 : cmpbge zero, t2, t8 # e1 : test new word for eos (stall) extqh t2, a1, t0 # e0 : extract low bits for current word beq t8, $u_loop # .. e1 : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: or t0, t1, t0 # e0 : first (partial) source word complete nop # .. e1 : cmpbge zero, t0, t8 # e0 : is the null in this first bit? bne t8, $u_final # .. e1 (zdb) stq_u t0, 0(a0) # e0 : the null was in the high-order bits addq a0, 8, a0 # .. e1 : subq a2, 1, a2 # e1 : $u_late_head_exit: extql t2, a1, t0 # .. e0 : cmpbge zero, t0, t8 # e0 : or t8, t10, t6 # e1 : cmoveq a2, t6, t8 # e0 : nop # .. e1 : /* Take care of a final (probably partial) result word. On entry to this basic block: t0 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # e0 : isolate low bit set and t6, t8, t12 # e1 : and t12, 0x80, t6 # e0 : avoid dest word load if we can bne t6, 1f # .. e1 (zdb) ldq_u t1, 0(a0) # e0 : subq t12, 1, t6 # .. e1 : or t6, t12, t8 # e0 : zapnot t0, t8, t0 # .. e1 : kill source bytes > null zap t1, t8, t1 # e0 : kill dest bytes <= null or t0, t1, t0 # e1 : 1: stq_u t0, 0(a0) # e0 : ret (t9) # .. e1 : /* Got to end-of-count before end of string. On entry to this basic block: t1 == the shifted high-order bits from the previous source word */ $u_eoc: and a1, 7, t6 # e1 : sll t10, t6, t6 # e0 : and t6, 0xff, t6 # e0 : bne t6, 1f # .. e1 : ldq_u t2, 8(a1) # e0 : load final src word nop # .. e1 : extqh t2, a1, t0 # e0 : extract low bits for last word or t1, t0, t1 # e1 : 1: cmpbge zero, t1, t8 mov t1, t0 $u_eocfin: # end-of-count, final word or t10, t8, t8 br $u_final /* Unaligned copy entry point. */ .align 3 $unaligned: ldq_u t1, 0(a1) # e0 : load first source word and a0, 7, t4 # .. e1 : find dest misalignment and a1, 7, t5 # e0 : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # .. e1 : mov zero, t6 # e0 : beq t4, 1f # .. e1 : ldq_u t0, 0(a0) # e0 : lda t6, -1 # .. e1 : mskql t6, a0, t6 # e0 : subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ 1: cmplt t4, t5, t12 # e1 : extql t1, a1, t1 # .. e0 : shift src into place lda t2, -1 # e0 : for creating masks later beq t12, $u_head # .. e1 : extql t2, a1, t2 # e0 : cmpbge zero, t1, t8 # .. e1 : is there a zero? andnot t2, t6, t2 # e0 : dest mask for a single word copy or t8, t10, t5 # .. e1 : test for end-of-count too cmpbge zero, t2, t3 # e0 : cmoveq a2, t5, t8 # .. e1 : andnot t8, t3, t8 # e0 : beq t8, $u_head # .. e1 (zdb) /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # e0 : negq t8, t6 # .. e1 : build bitmask of bytes <= zero mskqh t1, t4, t1 # e0 : and t6, t8, t12 # .. e1 : subq t12, 1, t6 # e0 : or t6, t12, t8 # e1 : zapnot t2, t8, t2 # e0 : prepare source word; mirror changes zapnot t1, t8, t1 # .. e1 : to source validity mask andnot t0, t2, t0 # e0 : zero place for source to reside or t0, t1, t0 # e1 : and put it there stq_u t0, 0(a0) # e0 : ret (t9) # .. e1 : .end __stxncpy
AirFortressIlikara/LS2K0300-linux-4.19
1,361
arch/alpha/lib/ev67-strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strlen.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> .set noreorder .set noat .globl strlen .ent strlen .align 4 strlen: ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned) lda $2, -1($31) # E : insqh $2, $16, $2 # U : andnot $16, 7, $0 # E : or $2, $1, $1 # E : cmpbge $31, $1, $2 # E : $2 <- bitmask: bit i == 1 <==> i-th byte == 0 nop # E : bne $2, $found # U : $loop: ldq $1, 8($0) # L : addq $0, 8, $0 # E : addr += 8 cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $0, $3, $0 # E : subq $0, $16, $0 # E : ret $31, ($26) # L0 : .end strlen EXPORT_SYMBOL(strlen)
AirFortressIlikara/LS2K0300-linux-4.19
11,554
arch/alpha/lib/ev6-stxncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-stxncpy.S * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. * * This is an internal routine used by strncpy, stpncpy, and strncat. * As such, it uses special linkage conventions to make implementation * of these public functions more efficient. * * On input: * t9 = return address * a0 = DST * a1 = SRC * a2 = COUNT * * Furthermore, COUNT may not be zero. * * On output: * t0 = last word written * t10 = bitmask (with one bit set) indicating the byte position of * the end of the range specified by COUNT * t12 = bitmask (with one bit set) indicating the last byte written * a0 = unaligned address of the last *word* written * a2 = the number of full words left in COUNT * * Furthermore, v0, a3-a5, t11, and $at are untouched. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/regdef.h> .set noat .set noreorder .text /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that doesn't like putting the entry point for a procedure somewhere in the middle of the procedure descriptor. Work around this by putting the aligned copy in its own procedure descriptor */ .ent stxncpy_aligned .align 4 stxncpy_aligned: .frame sp, 0, t9, 0 .prologue 0 /* On entry to this basic block: t0 == the first destination word for masking back in t1 == the first source word. */ /* Create the 1st output word and detect 0's in the 1st input word. */ lda t2, -1 # E : build a mask against false zero mskqh t2, a1, t2 # U : detection in the src word (stall) mskqh t1, a1, t3 # U : ornot t1, t2, t2 # E : (stall) mskql t0, a1, t0 # U : assemble the first output word cmpbge zero, t2, t8 # E : bits set iff null found or t0, t3, t0 # E : (stall) beq a2, $a_eoc # U : bne t8, $a_eos # U : nop nop nop /* On entry to this basic block: t0 == a source word not containing a null. */ /* * nops here to: * separate store quads from load quads * limit of 1 bcond/quad to permit training */ $a_loop: stq_u t0, 0(a0) # L : addq a0, 8, a0 # E : subq a2, 1, a2 # E : nop ldq_u t0, 0(a1) # L : addq a1, 8, a1 # E : cmpbge zero, t0, t8 # E : beq a2, $a_eoc # U : beq t8, $a_loop # U : nop nop nop /* Take care of the final (partial) word store. At this point the end-of-count bit is set in t8 iff it applies. On entry to this basic block we have: t0 == the source word containing the null t8 == the cmpbge mask that found it. */ $a_eos: negq t8, t12 # E : find low bit set and t8, t12, t12 # E : (stall) /* For the sake of the cache, don't read a destination word if we're not going to need it. */ and t12, 0x80, t6 # E : (stall) bne t6, 1f # U : (stall) /* We're doing a partial word store and so need to combine our source and original destination words. */ ldq_u t1, 0(a0) # L : subq t12, 1, t6 # E : or t12, t6, t8 # E : (stall) zapnot t0, t8, t0 # U : clear src bytes > null (stall) zap t1, t8, t1 # .. e1 : clear dst bytes <= null or t0, t1, t0 # e1 : (stall) nop nop 1: stq_u t0, 0(a0) # L : ret (t9) # L0 : Latency=3 nop nop /* Add the end-of-count bit to the eos detection bitmask. */ $a_eoc: or t10, t8, t8 # E : br $a_eos # L0 : Latency=3 nop nop .end stxncpy_aligned .align 4 .ent __stxncpy .globl __stxncpy __stxncpy: .frame sp, 0, t9, 0 .prologue 0 /* Are source and destination co-aligned? */ xor a0, a1, t1 # E : and a0, 7, t0 # E : find dest misalignment and t1, 7, t1 # E : (stall) addq a2, t0, a2 # E : bias count by dest misalignment (stall) subq a2, 1, a2 # E : and a2, 7, t2 # E : (stall) srl a2, 3, a2 # U : a2 = loop counter = (count - 1)/8 (stall) addq zero, 1, t10 # E : sll t10, t2, t10 # U : t10 = bitmask of last count byte bne t1, $unaligned # U : /* We are co-aligned; take care of a partial first word. */ ldq_u t1, 0(a1) # L : load first src word addq a1, 8, a1 # E : beq t0, stxncpy_aligned # U : avoid loading dest word if not needed ldq_u t0, 0(a0) # L : nop nop br stxncpy_aligned # .. e1 : nop nop nop /* The source and destination are not co-aligned. Align the destination and cope. We have to be very careful about not reading too much and causing a SEGV. */ .align 4 $u_head: /* We know just enough now to be able to assemble the first full source word. We can still find a zero at the end of it that prevents us from outputting the whole thing. On entry to this basic block: t0 == the first dest word, unmasked t1 == the shifted low bits of the first source word t6 == bytemask that is -1 in dest word bytes */ ldq_u t2, 8(a1) # L : Latency=3 load second src word addq a1, 8, a1 # E : mskql t0, a0, t0 # U : mask trailing garbage in dst extqh t2, a1, t4 # U : (3 cycle stall on t2) or t1, t4, t1 # E : first aligned src word complete (stall) mskqh t1, a0, t1 # U : mask leading garbage in src (stall) or t0, t1, t0 # E : first output word complete (stall) or t0, t6, t6 # E : mask original data for zero test (stall) cmpbge zero, t6, t8 # E : beq a2, $u_eocfin # U : lda t6, -1 # E : nop bne t8, $u_final # U : mskql t6, a1, t6 # U : mask out bits already seen stq_u t0, 0(a0) # L : store first output word or t6, t2, t2 # E : (stall) cmpbge zero, t2, t8 # E : find nulls in second partial addq a0, 8, a0 # E : subq a2, 1, a2 # E : bne t8, $u_late_head_exit # U : /* Finally, we've got all the stupid leading edge cases taken care of and we can set up to enter the main loop. */ extql t2, a1, t1 # U : position hi-bits of lo word beq a2, $u_eoc # U : ldq_u t2, 8(a1) # L : read next high-order source word addq a1, 8, a1 # E : extqh t2, a1, t0 # U : position lo-bits of hi word (stall) cmpbge zero, t2, t8 # E : nop bne t8, $u_eos # U : /* Unaligned copy main loop. In order to avoid reading too much, the loop is structured to detect zeros in aligned source words. This has, unfortunately, effectively pulled half of a loop iteration out into the head and half into the tail, but it does prevent nastiness from accumulating in the very thing we want to run as fast as possible. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word We further know that t2 does not contain a null terminator. */ .align 4 $u_loop: or t0, t1, t0 # E : current dst word now complete subq a2, 1, a2 # E : decrement word count extql t2, a1, t1 # U : extract low bits for next time addq a0, 8, a0 # E : stq_u t0, -8(a0) # U : save the current word beq a2, $u_eoc # U : ldq_u t2, 8(a1) # U : Latency=3 load high word for next time addq a1, 8, a1 # E : extqh t2, a1, t0 # U : extract low bits (2 cycle stall) cmpbge zero, t2, t8 # E : test new word for eos nop beq t8, $u_loop # U : /* We've found a zero somewhere in the source word we just read. If it resides in the lower half, we have one (probably partial) word to write out, and if it resides in the upper half, we have one full and one partial word left to write out. On entry to this basic block: t0 == the shifted low-order bits from the current source word t1 == the shifted high-order bits from the previous source word t2 == the unshifted current source word. */ $u_eos: or t0, t1, t0 # E : first (partial) source word complete nop cmpbge zero, t0, t8 # E : is the null in this first bit? (stall) bne t8, $u_final # U : (stall) stq_u t0, 0(a0) # L : the null was in the high-order bits addq a0, 8, a0 # E : subq a2, 1, a2 # E : nop $u_late_head_exit: extql t2, a1, t0 # U : cmpbge zero, t0, t8 # E : or t8, t10, t6 # E : (stall) cmoveq a2, t6, t8 # E : Latency=2, extra map slot (stall) /* Take care of a final (probably partial) result word. On entry to this basic block: t0 == assembled source word t8 == cmpbge mask that found the null. */ $u_final: negq t8, t6 # E : isolate low bit set and t6, t8, t12 # E : (stall) and t12, 0x80, t6 # E : avoid dest word load if we can (stall) bne t6, 1f # U : (stall) ldq_u t1, 0(a0) # L : subq t12, 1, t6 # E : or t6, t12, t8 # E : (stall) zapnot t0, t8, t0 # U : kill source bytes > null zap t1, t8, t1 # U : kill dest bytes <= null or t0, t1, t0 # E : (stall) nop nop 1: stq_u t0, 0(a0) # L : ret (t9) # L0 : Latency=3 /* Got to end-of-count before end of string. On entry to this basic block: t1 == the shifted high-order bits from the previous source word */ $u_eoc: and a1, 7, t6 # E : avoid final load if possible sll t10, t6, t6 # U : (stall) and t6, 0xff, t6 # E : (stall) bne t6, 1f # U : (stall) ldq_u t2, 8(a1) # L : load final src word nop extqh t2, a1, t0 # U : extract low bits for last word (stall) or t1, t0, t1 # E : (stall) 1: cmpbge zero, t1, t8 # E : mov t1, t0 # E : $u_eocfin: # end-of-count, final word or t10, t8, t8 # E : br $u_final # L0 : Latency=3 /* Unaligned copy entry point. */ .align 4 $unaligned: ldq_u t1, 0(a1) # L : load first source word and a0, 7, t4 # E : find dest misalignment and a1, 7, t5 # E : find src misalignment /* Conditionally load the first destination word and a bytemask with 0xff indicating that the destination byte is sacrosanct. */ mov zero, t0 # E : mov zero, t6 # E : beq t4, 1f # U : ldq_u t0, 0(a0) # L : lda t6, -1 # E : mskql t6, a0, t6 # U : nop nop subq a1, t4, a1 # E : sub dest misalignment from src addr /* If source misalignment is larger than dest misalignment, we need extra startup checks to avoid SEGV. */ 1: cmplt t4, t5, t12 # E : extql t1, a1, t1 # U : shift src into place lda t2, -1 # E : for creating masks later beq t12, $u_head # U : (stall) extql t2, a1, t2 # U : cmpbge zero, t1, t8 # E : is there a zero? andnot t2, t6, t2 # E : dest mask for a single word copy or t8, t10, t5 # E : test for end-of-count too cmpbge zero, t2, t3 # E : cmoveq a2, t5, t8 # E : Latency=2, extra map slot nop # E : keep with cmoveq andnot t8, t3, t8 # E : (stall) beq t8, $u_head # U : /* At this point we've found a zero in the first partial word of the source. We need to isolate the valid source data and mask it into the original destination data. (Incidentally, we know that we'll need at least one byte of that original dest word.) */ ldq_u t0, 0(a0) # L : negq t8, t6 # E : build bitmask of bytes <= zero mskqh t1, t4, t1 # U : and t6, t8, t12 # E : subq t12, 1, t6 # E : (stall) or t6, t12, t8 # E : (stall) zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall) zapnot t1, t8, t1 # U : to source validity mask andnot t0, t2, t0 # E : zero place for source to reside or t0, t1, t0 # E : and put it there (stall both t0, t1) stq_u t0, 0(a0) # L : (stall) ret (t9) # L0 : Latency=3 nop nop nop .end __stxncpy
AirFortressIlikara/LS2K0300-linux-4.19
2,735
arch/alpha/lib/memmove.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/memmove.S * * Barely optimized memmove routine for Alpha EV5. * * This is hand-massaged output from the original memcpy.c. We defer to * memcpy whenever possible; the backwards copy loops are not unrolled. */ #include <asm/export.h> .set noat .set noreorder .text .align 4 .globl memmove .ent memmove memmove: ldgp $29, 0($27) unop nop .prologue 1 addq $16,$18,$4 addq $17,$18,$5 cmpule $4,$17,$1 /* dest + n <= src */ cmpule $5,$16,$2 /* dest >= src + n */ bis $1,$2,$1 mov $16,$0 xor $16,$17,$2 bne $1,memcpy !samegp and $2,7,$2 /* Test for src/dest co-alignment. */ and $16,7,$1 cmpule $16,$17,$3 bne $3,$memmove_up /* dest < src */ and $4,7,$1 bne $2,$misaligned_dn unop beq $1,$skip_aligned_byte_loop_head_dn $aligned_byte_loop_head_dn: lda $4,-1($4) lda $5,-1($5) unop ble $18,$egress ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 and $4,7,$6 stq_u $1,0($4) bne $6,$aligned_byte_loop_head_dn $skip_aligned_byte_loop_head_dn: lda $18,-8($18) blt $18,$skip_aligned_word_loop_dn $aligned_word_loop_dn: ldq $1,-8($5) nop lda $5,-8($5) lda $18,-8($18) stq $1,-8($4) nop lda $4,-8($4) bge $18,$aligned_word_loop_dn $skip_aligned_word_loop_dn: lda $18,8($18) bgt $18,$byte_loop_tail_dn unop ret $31,($26),1 .align 4 $misaligned_dn: nop fnop unop beq $18,$egress $byte_loop_tail_dn: ldq_u $3,-1($5) ldq_u $2,-1($4) lda $5,-1($5) lda $4,-1($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 stq_u $1,0($4) bgt $18,$byte_loop_tail_dn br $egress $memmove_up: mov $16,$4 mov $17,$5 bne $2,$misaligned_up beq $1,$skip_aligned_byte_loop_head_up $aligned_byte_loop_head_up: unop ble $18,$egress ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 lda $5,1($5) stq_u $1,0($4) lda $4,1($4) and $4,7,$6 bne $6,$aligned_byte_loop_head_up $skip_aligned_byte_loop_head_up: lda $18,-8($18) blt $18,$skip_aligned_word_loop_up $aligned_word_loop_up: ldq $1,0($5) nop lda $5,8($5) lda $18,-8($18) stq $1,0($4) nop lda $4,8($4) bge $18,$aligned_word_loop_up $skip_aligned_word_loop_up: lda $18,8($18) bgt $18,$byte_loop_tail_up unop ret $31,($26),1 .align 4 $misaligned_up: nop fnop unop beq $18,$egress $byte_loop_tail_up: ldq_u $3,0($5) ldq_u $2,0($4) lda $18,-1($18) extbl $3,$5,$1 insbl $1,$4,$1 mskbl $2,$4,$2 bis $1,$2,$1 stq_u $1,0($4) lda $5,1($5) lda $4,1($4) nop bgt $18,$byte_loop_tail_up $egress: ret $31,($26),1 nop nop nop .end memmove EXPORT_SYMBOL(memmove)
AirFortressIlikara/LS2K0300-linux-4.19
1,627
arch/alpha/lib/ev67-strcat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strcat.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Append a null-terminated string from SRC to DST. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Commentary: It seems bogus to walk the input string twice - once * to determine the length, and then again while doing the copy. * A significant (future) enhancement would be to only read the input * string once. */ #include <asm/export.h> .text .align 4 .globl strcat .ent strcat strcat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # E : set up return value /* Find the end of the string. */ ldq_u $1, 0($16) # L : load first quadword (a0 may be misaligned) lda $2, -1 # E : insqh $2, $16, $2 # U : andnot $16, 7, $16 # E : or $2, $1, $1 # E : cmpbge $31, $1, $2 # E : bits set iff byte == 0 bne $2, $found # U : $loop: ldq $1, 8($16) # L : addq $16, 8, $16 # E : cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $16, $3, $16 # E : /* Now do the append. */ mov $26, $23 # E : br __stxcpy # L0 : .end strcat EXPORT_SYMBOL(strcat)
AirFortressIlikara/LS2K0300-linux-4.19
5,444
arch/alpha/lib/ev6-memchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-memchr.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds characters in a memory area. Optimized for the Alpha: * * - memory accessed as aligned quadwords only * - uses cmpbge to compare 8 bytes in parallel * - does binary search to find 0 byte in last * quadword (HAKMEM needed 12 instructions to * do this instead of the 9 instructions that * binary search needs). * * For correctness consider that: * * - only minimum number of quadwords may be accessed * - the third argument is an unsigned long * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> .set noreorder .set noat .align 4 .globl memchr .ent memchr memchr: .frame $30,0,$26,0 .prologue 0 # Hack -- if someone passes in (size_t)-1, hoping to just # search til the end of the address space, we will overflow # below when we find the address of the last byte. Given # that we will never have a 56-bit address space, cropping # the length is the easiest way to avoid trouble. zap $18, 0x80, $5 # U : Bound length beq $18, $not_found # U : ldq_u $1, 0($16) # L : load first quadword Latency=3 and $17, 0xff, $17 # E : L L U U : 00000000000000ch insbl $17, 1, $2 # U : 000000000000ch00 cmpult $18, 9, $4 # E : small (< 1 quad) string? or $2, $17, $17 # E : 000000000000chch lda $3, -1($31) # E : U L L U sll $17, 16, $2 # U : 00000000chch0000 addq $16, $5, $5 # E : Max search address or $2, $17, $17 # E : 00000000chchchch sll $17, 32, $2 # U : U L L U : chchchch00000000 or $2, $17, $17 # E : chchchchchchchch extql $1, $16, $7 # U : $7 is upper bits beq $4, $first_quad # U : ldq_u $6, -1($5) # L : L U U L : eight or less bytes to search Latency=3 extqh $6, $16, $6 # U : 2 cycle stall for $6 mov $16, $0 # E : nop # E : or $7, $6, $1 # E : L U L U $1 = quadword starting at $16 # Deal with the case where at most 8 bytes remain to be searched # in $1. E.g.: # $18 = 6 # $1 = ????c6c5c4c3c2c1 $last_quad: negq $18, $6 # E : xor $17, $1, $1 # E : srl $3, $6, $6 # U : $6 = mask of $18 bits set cmpbge $31, $1, $2 # E : L U L U nop nop and $2, $6, $2 # E : beq $2, $not_found # U : U L U L $found_it: #ifdef CONFIG_ALPHA_EV67 /* * Since we are guaranteed to have set one of the bits, we don't * have to worry about coming back with a 0x40 out of cttz... */ cttz $2, $3 # U0 : addq $0, $3, $0 # E : All done nop # E : ret # L0 : L U L U #else /* * Slow and clunky. It can probably be improved. * An exercise left for others. */ negq $2, $3 # E : and $2, $3, $2 # E : and $2, 0x0f, $1 # E : addq $0, 4, $3 # E : cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x33, $1 # E : addq $0, 2, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop # E : keep with cmov and $2, 0x55, $1 # E : addq $0, 1, $3 # E : U L U L : 2 cycle stall on $0 cmoveq $1, $3, $0 # E : Latency 2, extra map cycle nop nop ret # L0 : L U L U #endif # Deal with the case where $18 > 8 bytes remain to be # searched. $16 may not be aligned. .align 4 $first_quad: andnot $16, 0x7, $0 # E : insqh $3, $16, $2 # U : $2 = 0000ffffffffffff ($16<0:2> ff) xor $1, $17, $1 # E : or $1, $2, $1 # E : U L U L $1 = ====ffffffffffff cmpbge $31, $1, $2 # E : bne $2, $found_it # U : # At least one byte left to process. ldq $1, 8($0) # L : subq $5, 1, $18 # E : U L U L addq $0, 8, $0 # E : # Make $18 point to last quad to be accessed (the # last quad may or may not be partial). andnot $18, 0x7, $18 # E : cmpult $0, $18, $2 # E : beq $2, $final # U : U L U L # At least two quads remain to be accessed. subq $18, $0, $4 # E : $4 <- nr quads to be processed and $4, 8, $4 # E : odd number of quads? bne $4, $odd_quad_count # U : # At least three quads remain to be accessed mov $1, $4 # E : L U L U : move prefetched value to correct reg .align 4 $unrolled_loop: ldq $1, 8($0) # L : prefetch $1 xor $17, $4, $2 # E : cmpbge $31, $2, $2 # E : bne $2, $found_it # U : U L U L addq $0, 8, $0 # E : nop # E : nop # E : nop # E : $odd_quad_count: xor $17, $1, $2 # E : ldq $4, 8($0) # L : prefetch $4 cmpbge $31, $2, $2 # E : addq $0, 8, $6 # E : bne $2, $found_it # U : cmpult $6, $18, $6 # E : addq $0, 8, $0 # E : nop # E : bne $6, $unrolled_loop # U : mov $4, $1 # E : move prefetched value into $1 nop # E : nop # E : $final: subq $5, $0, $18 # E : $18 <- number of bytes left to do nop # E : nop # E : bne $18, $last_quad # U : $not_found: mov $31, $0 # E : nop # E : nop # E : ret # L0 : .end memchr EXPORT_SYMBOL(memchr)
AirFortressIlikara/LS2K0300-linux-4.19
3,198
arch/alpha/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/alpha/lib/memset.S * * This is an efficient (and small) implementation of the C library "memset()" * function for the alpha. * * (C) Copyright 1996 Linus Torvalds * * This routine is "moral-ware": you are free to use it any way you wish, and * the only obligation I put on you is a moral one: if you make any improvements * to the routine, please send me your improvements for me to use similarly. * * The scheduling comments are according to the EV5 documentation (and done by * hand, so they might well be incorrect, please do tell me about it..) */ #include <asm/export.h> .set noat .set noreorder .text .globl memset .globl __memset .globl ___memset .globl __memset16 .globl __constant_c_memset .ent ___memset .align 5 ___memset: .frame $30,0,$26,0 .prologue 0 and $17,255,$1 /* E1 */ insbl $17,1,$17 /* .. E0 */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ sll $17,16,$1 /* E1 (p-c latency, next cycle) */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ sll $17,32,$1 /* E1 (p-c latency, next cycle) */ bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ ldq_u $31,0($30) /* .. E1 */ .align 5 __constant_c_memset: addq $18,$16,$6 /* E0 */ bis $16,$16,$0 /* .. E1 */ xor $16,$6,$1 /* E0 */ ble $18,end /* .. E1 */ bic $1,7,$1 /* E0 */ beq $1,within_one_quad /* .. E1 (note EV5 zero-latency forwarding) */ and $16,7,$3 /* E0 */ beq $3,aligned /* .. E1 (note EV5 zero-latency forwarding) */ ldq_u $4,0($16) /* E0 */ bis $16,$16,$5 /* .. E1 */ insql $17,$16,$2 /* E0 */ subq $3,8,$3 /* .. E1 */ addq $18,$3,$18 /* E0 $18 is new count ($3 is negative) */ mskql $4,$16,$4 /* .. E1 (and possible load stall) */ subq $16,$3,$16 /* E0 $16 is new aligned destination */ bis $2,$4,$1 /* .. E1 */ bis $31,$31,$31 /* E0 */ ldq_u $31,0($30) /* .. E1 */ stq_u $1,0($5) /* E0 */ bis $31,$31,$31 /* .. E1 */ .align 4 aligned: sra $18,3,$3 /* E0 */ and $18,7,$18 /* .. E1 */ bis $16,$16,$5 /* E0 */ beq $3,no_quad /* .. E1 */ .align 3 loop: stq $17,0($5) /* E0 */ subq $3,1,$3 /* .. E1 */ addq $5,8,$5 /* E0 */ bne $3,loop /* .. E1 */ no_quad: bis $31,$31,$31 /* E0 */ beq $18,end /* .. E1 */ ldq $7,0($5) /* E0 */ mskqh $7,$6,$2 /* .. E1 (and load stall) */ insqh $17,$6,$4 /* E0 */ bis $2,$4,$1 /* .. E1 */ stq $1,0($5) /* E0 */ ret $31,($26),1 /* .. E1 */ .align 3 within_one_quad: ldq_u $1,0($16) /* E0 */ insql $17,$16,$2 /* E1 */ mskql $1,$16,$4 /* E0 (after load stall) */ bis $2,$4,$2 /* E0 */ mskql $2,$6,$4 /* E0 */ mskqh $1,$6,$2 /* .. E1 */ bis $2,$4,$1 /* E0 */ stq_u $1,0($16) /* E0 */ end: ret $31,($26),1 /* E1 */ .end ___memset EXPORT_SYMBOL(___memset) EXPORT_SYMBOL(__constant_c_memset) .align 5 .ent __memset16 __memset16: .prologue 0 inswl $17,0,$1 /* E0 */ inswl $17,2,$2 /* E0 */ inswl $17,4,$3 /* E0 */ or $1,$2,$1 /* .. E1 */ inswl $17,6,$4 /* E0 */ or $1,$3,$1 /* .. E1 */ or $1,$4,$17 /* E0 */ br __constant_c_memset /* .. E1 */ .end __memset16 EXPORT_SYMBOL(__memset16) memset = ___memset __memset = ___memset EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset)
AirFortressIlikara/LS2K0300-linux-4.19
5,302
arch/alpha/lib/ev6-csum_ipv6_magic.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-csum_ipv6_magic.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Determining actual stalls (other than slotting) doesn't appear to be easy to do. * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Swap <proto> (takes form 0xaabb) * Then shift it left by 48, so result is: * 0xbbaa0000 00000000 * Then turn it back into a sign extended 32-bit item * 0xbbaa0000 * * Swap <len> (an unsigned int) using Mike Burrows' 7-instruction sequence * (we can't hide the 3-cycle latency of the unpkbw in the 6-instruction sequence) * Assume input takes form 0xAABBCCDD * * Finally, original 'folding' approach is to split the long into 4 unsigned shorts * add 4 ushorts, resulting in ushort/carry * add carry bits + ushort --> ushort * add carry bits + ushort --> ushort (in case the carry results in an overflow) * Truncate to a ushort. (took 13 instructions) * From doing some testing, using the approach in checksum.c:from64to16() * results in the same outcome: * split into 2 uints, add those, generating a ulong * add the 3 low ushorts together, generating a uint * a final add of the 2 lower ushorts * truncating the result. * * Misalignment handling added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> * The cost is 16 instructions (~8 cycles), including two extra loads which * may cause additional delay in rare cases (load-load replay traps). */ #include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic .frame $30,0,$26,0 csum_ipv6_magic: .prologue 0 ldq_u $0,0($16) # L : Latency: 3 inslh $18,7,$4 # U : 0000000000AABBCC ldq_u $1,8($16) # L : Latency: 3 sll $19,8,$7 # U : U L U L : 0x00000000 00aabb00 and $16,7,$6 # E : src misalignment ldq_u $5,15($16) # L : Latency: 3 zapnot $20,15,$20 # U : zero extend incoming csum ldq_u $2,0($17) # L : U L U L : Latency: 3 extql $0,$6,$0 # U : extqh $1,$6,$22 # U : ldq_u $3,8($17) # L : Latency: 3 sll $19,24,$19 # U : U U L U : 0x000000aa bb000000 cmoveq $6,$31,$22 # E : src aligned? ldq_u $23,15($17) # L : Latency: 3 inswl $18,3,$18 # U : 000000CCDD000000 addl $19,$7,$19 # E : U L U L : <sign bits>bbaabb00 or $0,$22,$0 # E : 1st src word complete extql $1,$6,$1 # U : or $18,$4,$18 # E : 000000CCDDAABBCC extqh $5,$6,$5 # U : L U L U and $17,7,$6 # E : dst misalignment extql $2,$6,$2 # U : or $1,$5,$1 # E : 2nd src word complete extqh $3,$6,$22 # U : L U L U : cmoveq $6,$31,$22 # E : dst aligned? extql $3,$6,$3 # U : addq $20,$0,$20 # E : begin summing the words extqh $23,$6,$23 # U : L U L U : srl $18,16,$4 # U : 0000000000CCDDAA or $2,$22,$2 # E : 1st dst word complete zap $19,0x3,$19 # U : <sign bits>bbaa0000 or $3,$23,$3 # E : U L U L : 2nd dst word complete cmpult $20,$0,$0 # E : addq $20,$1,$20 # E : zapnot $18,0xa,$18 # U : 00000000DD00BB00 zap $4,0xa,$4 # U : U U L L : 0000000000CC00AA or $18,$4,$18 # E : 00000000DDCCBBAA nop # E : cmpult $20,$1,$1 # E : addq $20,$2,$20 # E : U L U L cmpult $20,$2,$2 # E : addq $20,$3,$20 # E : cmpult $20,$3,$3 # E : (1 cycle stall on $20) addq $20,$18,$20 # E : U L U L (1 cycle stall on $20) cmpult $20,$18,$18 # E : addq $20,$19,$20 # E : (1 cycle stall on $20) addq $0,$1,$0 # E : merge the carries back into the csum addq $2,$3,$2 # E : cmpult $20,$19,$19 # E : addq $18,$19,$18 # E : (1 cycle stall on $19) addq $0,$2,$0 # E : addq $20,$18,$20 # E : U L U L : /* (1 cycle stall on $18, 2 cycles on $20) */ addq $0,$20,$0 # E : zapnot $0,15,$1 # U : Start folding output (1 cycle stall on $0) nop # E : srl $0,32,$0 # U : U L U L : (1 cycle stall on $0) addq $1,$0,$1 # E : Finished generating ulong extwl $1,2,$2 # U : ushort[1] (1 cycle stall on $1) zapnot $1,3,$0 # U : ushort[0] (1 cycle stall on $1) extwl $1,4,$1 # U : ushort[2] (1 cycle stall on $1) addq $0,$2,$0 # E addq $0,$1,$3 # E : Finished generating uint /* (1 cycle stall on $0) */ extwl $3,2,$1 # U : ushort[1] (1 cycle stall on $3) nop # E : L U L U addq $1,$3,$0 # E : Final carry not $0,$4 # E : complement (1 cycle stall on $0) zapnot $4,3,$0 # U : clear upper garbage bits /* (1 cycle stall on $4) */ ret # L0 : L U L U .end csum_ipv6_magic EXPORT_SYMBOL(csum_ipv6_magic)
AirFortressIlikara/LS2K0300-linux-4.19
2,116
arch/alpha/lib/copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/copy_user.S * * Copy to/from user space, handling exceptions as we go.. This * isn't exactly pretty. * * This is essentially the same as "memcpy()", but with a few twists. * Notably, we have to make sure that $0 is always up-to-date and * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitin-99b($31); \ .previous #define EXO(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitout-99b($31); \ .previous .set noat .align 4 .globl __copy_user .ent __copy_user __copy_user: .prologue 0 mov $18,$0 and $16,7,$3 beq $0,$35 beq $3,$36 subq $3,8,$3 .align 4 $37: EXI( ldq_u $1,0($17) ) EXO( ldq_u $2,0($16) ) extbl $1,$17,$1 mskbl $2,$16,$2 insbl $1,$16,$1 addq $3,1,$3 bis $1,$2,$1 EXO( stq_u $1,0($16) ) subq $0,1,$0 addq $16,1,$16 addq $17,1,$17 beq $0,$41 bne $3,$37 $36: and $17,7,$1 bic $0,7,$4 beq $1,$43 beq $4,$48 EXI( ldq_u $3,0($17) ) .align 4 $50: EXI( ldq_u $2,8($17) ) subq $4,8,$4 extql $3,$17,$3 extqh $2,$17,$1 bis $3,$1,$1 EXO( stq $1,0($16) ) addq $17,8,$17 subq $0,8,$0 addq $16,8,$16 bis $2,$2,$3 bne $4,$50 $48: beq $0,$41 .align 4 $57: EXI( ldq_u $1,0($17) ) EXO( ldq_u $2,0($16) ) extbl $1,$17,$1 mskbl $2,$16,$2 insbl $1,$16,$1 bis $1,$2,$1 EXO( stq_u $1,0($16) ) subq $0,1,$0 addq $16,1,$16 addq $17,1,$17 bne $0,$57 br $31,$41 .align 4 $43: beq $4,$65 .align 4 $66: EXI( ldq $1,0($17) ) subq $4,8,$4 EXO( stq $1,0($16) ) addq $17,8,$17 subq $0,8,$0 addq $16,8,$16 bne $4,$66 $65: beq $0,$41 EXI( ldq $2,0($17) ) EXO( ldq $1,0($16) ) mskql $2,$0,$2 mskqh $1,$0,$1 bis $2,$1,$2 EXO( stq $2,0($16) ) bis $31,$31,$0 $41: $35: $exitin: $exitout: ret $31,($26),1 .end __copy_user EXPORT_SYMBOL(__copy_user)
AirFortressIlikara/LS2K0300-linux-4.19
6,508
arch/alpha/lib/ev6-divide.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-divide.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Alpha division.. */ /* * The alpha chip doesn't provide hardware division, so we have to do it * by hand. The compiler expects the functions * * __divqu: 64-bit unsigned long divide * __remqu: 64-bit unsigned long remainder * __divqs/__remqs: signed 64-bit * __divlu/__remlu: unsigned 32-bit * __divls/__remls: signed 32-bit * * These are not normal C functions: instead of the normal * calling sequence, these expect their arguments in registers * $24 and $25, and return the result in $27. Register $28 may * be clobbered (assembly temporary), anything else must be saved. * * In short: painful. * * This is a rather simple bit-at-a-time algorithm: it's very good * at dividing random 64-bit numbers, but the more usual case where * the divisor is small is handled better by the DEC algorithm * using lookup tables. This uses much less memory, though, and is * nicer on the cache.. Besides, I don't know the copyright status * of the DEC code. */ /* * My temporaries: * $0 - current bit * $1 - shifted divisor * $2 - modulus/quotient * * $23 - return address * $24 - dividend * $25 - divisor * * $27 - quotient/modulus * $28 - compare status * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> #define halt .long 0 /* * Select function type and registers */ #define mask $0 #define divisor $1 #define compare $28 #define tmp1 $3 #define tmp2 $4 #ifdef DIV #define DIV_ONLY(x,y...) x,##y #define MOD_ONLY(x,y...) #define func(x) __div##x #define modulus $2 #define quotient $27 #define GETSIGN(x) xor $24,$25,x #define STACK 48 #else #define DIV_ONLY(x,y...) #define MOD_ONLY(x,y...) x,##y #define func(x) __rem##x #define modulus $27 #define quotient $2 #define GETSIGN(x) bis $24,$24,x #define STACK 32 #endif /* * For 32-bit operations, we need to extend to 64-bit */ #ifdef INTSIZE #define ufunction func(lu) #define sfunction func(l) #define LONGIFY(x) zapnot x,15,x #define SLONGIFY(x) addl x,0,x #else #define ufunction func(qu) #define sfunction func(q) #define LONGIFY(x) #define SLONGIFY(x) #endif .set noat .align 4 .globl ufunction .ent ufunction ufunction: subq $30,STACK,$30 # E : .frame $30,STACK,$23 .prologue 0 7: stq $1, 0($30) # L : bis $25,$25,divisor # E : stq $2, 8($30) # L : L U L U bis $24,$24,modulus # E : stq $0,16($30) # L : bis $31,$31,quotient # E : LONGIFY(divisor) # E : U L L U stq tmp1,24($30) # L : LONGIFY(modulus) # E : bis $31,1,mask # E : DIV_ONLY(stq tmp2,32($30)) # L : L U U L beq divisor, 9f /* div by zero */ /* * In spite of the DIV_ONLY being either a non-instruction * or an actual stq, the addition of the .align directive * below ensures that label 1 is going to be nicely aligned */ .align 4 #ifdef INTSIZE /* * shift divisor left, using 3-bit shifts for * 32-bit divides as we can't overflow. Three-bit * shifts will result in looping three times less * here, but can result in two loops more later. * Thus using a large shift isn't worth it (and * s8add pairs better than a sll..) */ 1: cmpult divisor,modulus,compare # E : s8addq divisor,$31,divisor # E : s8addq mask,$31,mask # E : bne compare,1b # U : U L U L #else 1: cmpult divisor,modulus,compare # E : nop # E : nop # E : blt divisor, 2f # U : U L U L addq divisor,divisor,divisor # E : addq mask,mask,mask # E : unop # E : bne compare,1b # U : U L U L #endif /* ok, start to go right again.. */ 2: /* * Keep things nicely bundled... use a nop instead of not * having an instruction for DIV_ONLY */ #ifdef DIV DIV_ONLY(addq quotient,mask,tmp2) # E : #else nop # E : #endif srl mask,1,mask # U : cmpule divisor,modulus,compare # E : subq modulus,divisor,tmp1 # E : #ifdef DIV DIV_ONLY(cmovne compare,tmp2,quotient) # E : Latency 2, extra map slot nop # E : as part of the cmovne srl divisor,1,divisor # U : nop # E : L U L U nop # E : cmovne compare,tmp1,modulus # E : Latency 2, extra map slot nop # E : as part of the cmovne bne mask,2b # U : U L U L #else srl divisor,1,divisor # U : cmovne compare,tmp1,modulus # E : Latency 2, extra map slot nop # E : as part of the cmovne bne mask,2b # U : U L L U #endif 9: ldq $1, 0($30) # L : ldq $2, 8($30) # L : nop # E : nop # E : U U L L ldq $0,16($30) # L : ldq tmp1,24($30) # L : nop # E : nop # E : #ifdef DIV DIV_ONLY(ldq tmp2,32($30)) # L : #else nop # E : #endif addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end ufunction EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but * it's needed in some circumstances. There are different ways to * handle this, really. This does: * -a / b = a / -b = -(a / b) * -a % b = -(a % b) * a % -b = a % b * which is probably not the best solution, but at least should * have the property that (x/y)*y + (x%y) = x. */ .align 4 .globl sfunction .ent sfunction sfunction: subq $30,STACK,$30 # E : .frame $30,STACK,$23 .prologue 0 bis $24,$25,$28 # E : SLONGIFY($28) # E : bge $28,7b # U : stq $24,0($30) # L : subq $31,$24,$28 # E : stq $25,8($30) # L : nop # E : U L U L cmovlt $24,$28,$24 /* abs($24) */ # E : Latency 2, extra map slot nop # E : as part of the cmov stq $23,16($30) # L : subq $31,$25,$28 # E : U L U L stq tmp1,24($30) # L : cmovlt $25,$28,$25 /* abs($25) */ # E : Latency 2, extra map slot nop # E : bsr $23,ufunction # L0: L U L U ldq $24,0($30) # L : ldq $25,8($30) # L : GETSIGN($28) # E : subq $31,$27,tmp1 # E : U U L L SLONGIFY($28) # E : ldq $23,16($30) # L : cmovlt $28,tmp1,$27 # E : Latency 2, extra map slot nop # E : U L L U : as part of the cmov ldq tmp1,24($30) # L : nop # E : as part of the cmov addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end sfunction EXPORT_SYMBOL(sfunction)
AirFortressIlikara/LS2K0300-linux-4.19
2,980
arch/alpha/lib/csum_ipv6_magic.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/csum_ipv6_magic.S * Contributed by Richard Henderson <rth@tamu.edu> * * unsigned short csum_ipv6_magic(struct in6_addr *saddr, * struct in6_addr *daddr, * __u32 len, * unsigned short proto, * unsigned int csum); * * Misalignment handling (which costs 16 instructions / 8 cycles) * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> */ #include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic .frame $30,0,$26,0 csum_ipv6_magic: .prologue 0 ldq_u $0,0($16) # e0 : load src & dst addr words zapnot $20,15,$20 # .. e1 : zero extend incoming csum extqh $18,1,$4 # e0 : byte swap len & proto while we wait ldq_u $21,7($16) # .. e1 : handle misalignment extbl $18,1,$5 # e0 : ldq_u $1,8($16) # .. e1 : extbl $18,2,$6 # e0 : ldq_u $22,15($16) # .. e1 : extbl $18,3,$18 # e0 : ldq_u $2,0($17) # .. e1 : sra $4,32,$4 # e0 : ldq_u $23,7($17) # .. e1 : extql $0,$16,$0 # e0 : ldq_u $3,8($17) # .. e1 : extqh $21,$16,$21 # e0 : ldq_u $24,15($17) # .. e1 : sll $5,16,$5 # e0 : or $0,$21,$0 # .. e1 : 1st src word complete extql $1,$16,$1 # e0 : addq $20,$0,$20 # .. e1 : begin summing the words extqh $22,$16,$22 # e0 : cmpult $20,$0,$0 # .. e1 : sll $6,8,$6 # e0 : or $1,$22,$1 # .. e1 : 2nd src word complete extql $2,$17,$2 # e0 : or $4,$18,$18 # .. e1 : extqh $23,$17,$23 # e0 : or $5,$6,$5 # .. e1 : extql $3,$17,$3 # e0 : or $2,$23,$2 # .. e1 : 1st dst word complete extqh $24,$17,$24 # e0 : or $18,$5,$18 # .. e1 : len complete extwh $19,7,$7 # e0 : or $3,$24,$3 # .. e1 : 2nd dst word complete extbl $19,1,$19 # e0 : addq $20,$1,$20 # .. e1 : or $19,$7,$19 # e0 : cmpult $20,$1,$1 # .. e1 : sll $19,48,$19 # e0 : nop # .. e0 : sra $19,32,$19 # e0 : proto complete addq $20,$2,$20 # .. e1 : cmpult $20,$2,$2 # e0 : addq $20,$3,$20 # .. e1 : cmpult $20,$3,$3 # e0 : addq $20,$18,$20 # .. e1 : cmpult $20,$18,$18 # e0 : addq $20,$19,$20 # .. e1 : cmpult $20,$19,$19 # e0 : addq $0,$1,$0 # .. e1 : merge the carries back into the csum addq $2,$3,$2 # e0 : addq $18,$19,$18 # .. e1 : addq $0,$2,$0 # e0 : addq $20,$18,$20 # .. e1 : addq $0,$20,$0 # e0 : unop # : extwl $0,2,$2 # e0 : begin folding the 64-bit value zapnot $0,3,$3 # .. e1 : extwl $0,4,$1 # e0 : addq $2,$3,$3 # .. e1 : extwl $0,6,$0 # e0 : addq $3,$1,$3 # .. e1 : addq $0,$3,$0 # e0 : unop # : extwl $0,2,$1 # e0 : fold 18-bit value zapnot $0,3,$0 # .. e1 : addq $0,$1,$0 # e0 : unop # : extwl $0,2,$1 # e0 : fold 17-bit value zapnot $0,3,$0 # .. e1 : addq $0,$1,$0 # e0 : not $0,$0 # .. e1 : and complement. zapnot $0,3,$0 # e0 : ret # .. e1 : .end csum_ipv6_magic EXPORT_SYMBOL(csum_ipv6_magic)
AirFortressIlikara/LS2K0300-linux-4.19
2,854
arch/alpha/lib/strrchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strrchr.S * Contributed by Richard Henderson (rth@tamu.edu) * * Return the address of the last occurrence of a given character * within a null-terminated string, or null if it is not found. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 3 .ent strrchr .globl strrchr strrchr: .frame sp, 0, ra .prologue 0 zapnot a1, 1, a1 # e0 : zero extend our test character mov zero, t6 # .. e1 : t6 is last match aligned addr sll a1, 8, t5 # e0 : replicate our test character mov zero, t8 # .. e1 : t8 is last match byte compare mask or t5, a1, a1 # e0 : ldq_u t0, 0(a0) # .. e1 : load first quadword sll a1, 16, t5 # e0 : andnot a0, 7, v0 # .. e1 : align source addr or t5, a1, a1 # e0 : lda t4, -1 # .. e1 : build garbage mask sll a1, 32, t5 # e0 : cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero mskqh t4, a0, t4 # e0 : or t5, a1, a1 # .. e1 : character replication complete xor t0, a1, t2 # e0 : make bytes == c zero cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage cmpbge zero, t2, t3 # e0 : bits set iff byte == c andnot t1, t4, t1 # .. e1 : clear garbage from null test andnot t3, t4, t3 # e0 : clear garbage from char test bne t1, $eos # .. e1 : did we already hit the terminator? /* Character search main loop */ $loop: ldq t0, 8(v0) # e0 : load next quadword cmovne t3, v0, t6 # .. e1 : save previous comparisons match cmovne t3, t3, t8 # e0 : addq v0, 8, v0 # .. e1 : xor t0, a1, t2 # e0 : cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero cmpbge zero, t2, t3 # e0 : bits set iff byte == c beq t1, $loop # .. e1 : if we havnt seen a null, loop /* Mask out character matches after terminator */ $eos: negq t1, t4 # e0 : isolate first null byte match and t1, t4, t4 # e1 : subq t4, 1, t5 # e0 : build a mask of the bytes up to... or t4, t5, t4 # e1 : ... and including the null and t3, t4, t3 # e0 : mask out char matches after null cmovne t3, t3, t8 # .. e1 : save it, if match found cmovne t3, v0, t6 # e0 : /* Locate the address of the last matched character */ /* Retain the early exit for the ev4 -- the ev5 mispredict penalty is 5 cycles -- the same as just falling through. */ beq t8, $retnull # .. e1 : and t8, 0xf0, t2 # e0 : binary search for the high bit set cmovne t2, t2, t8 # .. e1 (zdb) cmovne t2, 4, t2 # e0 : and t8, 0xcc, t1 # .. e1 : cmovne t1, t1, t8 # e0 : cmovne t1, 2, t1 # .. e1 : and t8, 0xaa, t0 # e0 : cmovne t0, 1, t0 # .. e1 (zdb) addq t2, t1, t1 # e0 : addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix addq v0, t1, v0 # e0 : ret # .. e1 : $retnull: mov zero, v0 # e0 : ret # .. e1 : .end strrchr EXPORT_SYMBOL(strrchr)
AirFortressIlikara/LS2K0300-linux-4.19
3,379
arch/alpha/lib/ev67-strrchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strrchr.S * 21264 version by Rick Gorton <rick.gorton@alpha-processor.com> * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 4 .ent strrchr .globl strrchr strrchr: .frame sp, 0, ra .prologue 0 and a1, 0xff, t2 # E : 00000000000000ch insbl a1, 1, t4 # U : 000000000000ch00 insbl a1, 2, t5 # U : 0000000000ch0000 ldq_u t0, 0(a0) # L : load first quadword Latency=3 mov zero, t6 # E : t6 is last match aligned addr or t2, t4, a1 # E : 000000000000chch sll t5, 8, t3 # U : 00000000ch000000 mov zero, t8 # E : t8 is last match byte compare mask andnot a0, 7, v0 # E : align source addr or t5, t3, t3 # E : 00000000chch0000 sll a1, 32, t2 # U : 0000chch00000000 sll a1, 48, t4 # U : chch000000000000 or t4, a1, a1 # E : chch00000000chch or t2, t3, t2 # E : 0000chchchch0000 or a1, t2, a1 # E : chchchchchchchch lda t5, -1 # E : build garbage mask cmpbge zero, t0, t1 # E : bits set iff byte == zero mskqh t5, a0, t4 # E : Complete garbage mask xor t0, a1, t2 # E : make bytes == c zero cmpbge zero, t4, t4 # E : bits set iff byte is garbage cmpbge zero, t2, t3 # E : bits set iff byte == c andnot t1, t4, t1 # E : clear garbage from null test andnot t3, t4, t3 # E : clear garbage from char test bne t1, $eos # U : did we already hit the terminator? /* Character search main loop */ $loop: ldq t0, 8(v0) # L : load next quadword cmovne t3, v0, t6 # E : save previous comparisons match nop # : Latency=2, extra map slot (keep nop with cmov) nop cmovne t3, t3, t8 # E : Latency=2, extra map slot nop # : keep with cmovne addq v0, 8, v0 # E : xor t0, a1, t2 # E : cmpbge zero, t0, t1 # E : bits set iff byte == zero cmpbge zero, t2, t3 # E : bits set iff byte == c beq t1, $loop # U : if we havnt seen a null, loop nop /* Mask out character matches after terminator */ $eos: negq t1, t4 # E : isolate first null byte match and t1, t4, t4 # E : subq t4, 1, t5 # E : build a mask of the bytes up to... or t4, t5, t4 # E : ... and including the null and t3, t4, t3 # E : mask out char matches after null cmovne t3, t3, t8 # E : save it, if match found Latency=2, extra map slot nop # : Keep with cmovne nop cmovne t3, v0, t6 # E : nop # : Keep with cmovne /* Locate the address of the last matched character */ ctlz t8, t2 # U0 : Latency=3 (0x40 for t8=0) nop cmoveq t8, 0x3f, t2 # E : Compensate for case when no match is seen nop # E : hide the cmov latency (2) behind ctlz latency lda t5, 0x3f($31) # E : subq t5, t2, t5 # E : Normalize leading zero count addq t6, t5, v0 # E : and add to quadword address ret # L0 : Latency=3 nop nop .end strrchr EXPORT_SYMBOL(strrchr)
AirFortressIlikara/LS2K0300-linux-4.19
5,047
arch/alpha/lib/memchr.S
/* Copyright (C) 1996 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by David Mosberger (davidm@cs.arizona.edu). The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Finds characters in a memory area. Optimized for the Alpha: - memory accessed as aligned quadwords only - uses cmpbge to compare 8 bytes in parallel - does binary search to find 0 byte in last quadword (HAKMEM needed 12 instructions to do this instead of the 9 instructions that binary search needs). For correctness consider that: - only minimum number of quadwords may be accessed - the third argument is an unsigned long */ #include <asm/export.h> .set noreorder .set noat .globl memchr .ent memchr memchr: .frame $30,0,$26,0 .prologue 0 # Hack -- if someone passes in (size_t)-1, hoping to just # search til the end of the address space, we will overflow # below when we find the address of the last byte. Given # that we will never have a 56-bit address space, cropping # the length is the easiest way to avoid trouble. zap $18, 0x80, $5 #-e0 : beq $18, $not_found # .. e1 : ldq_u $1, 0($16) # e1 : load first quadword insbl $17, 1, $2 # .. e0 : $2 = 000000000000ch00 and $17, 0xff, $17 #-e0 : $17 = 00000000000000ch cmpult $18, 9, $4 # .. e1 : or $2, $17, $17 # e0 : $17 = 000000000000chch lda $3, -1($31) # .. e1 : sll $17, 16, $2 #-e0 : $2 = 00000000chch0000 addq $16, $5, $5 # .. e1 : or $2, $17, $17 # e1 : $17 = 00000000chchchch unop # : sll $17, 32, $2 #-e0 : $2 = chchchch00000000 or $2, $17, $17 # e1 : $17 = chchchchchchchch extql $1, $16, $7 # e0 : beq $4, $first_quad # .. e1 : ldq_u $6, -1($5) #-e1 : eight or less bytes to search extqh $6, $16, $6 # .. e0 : mov $16, $0 # e0 : or $7, $6, $1 # .. e1 : $1 = quadword starting at $16 # Deal with the case where at most 8 bytes remain to be searched # in $1. E.g.: # $18 = 6 # $1 = ????c6c5c4c3c2c1 $last_quad: negq $18, $6 #-e0 : xor $17, $1, $1 # .. e1 : srl $3, $6, $6 # e0 : $6 = mask of $18 bits set cmpbge $31, $1, $2 # .. e1 : and $2, $6, $2 #-e0 : beq $2, $not_found # .. e1 : $found_it: # Now, determine which byte matched: negq $2, $3 # e0 : and $2, $3, $2 # e1 : and $2, 0x0f, $1 #-e0 : addq $0, 4, $3 # .. e1 : cmoveq $1, $3, $0 # e0 : addq $0, 2, $3 # .. e1 : and $2, 0x33, $1 #-e0 : cmoveq $1, $3, $0 # .. e1 : and $2, 0x55, $1 # e0 : addq $0, 1, $3 # .. e1 : cmoveq $1, $3, $0 #-e0 : $done: ret # .. e1 : # Deal with the case where $18 > 8 bytes remain to be # searched. $16 may not be aligned. .align 4 $first_quad: andnot $16, 0x7, $0 #-e1 : insqh $3, $16, $2 # .. e0 : $2 = 0000ffffffffffff ($16<0:2> ff) xor $1, $17, $1 # e0 : or $1, $2, $1 # e1 : $1 = ====ffffffffffff cmpbge $31, $1, $2 #-e0 : bne $2, $found_it # .. e1 : # At least one byte left to process. ldq $1, 8($0) # e0 : subq $5, 1, $18 # .. e1 : addq $0, 8, $0 #-e0 : # Make $18 point to last quad to be accessed (the # last quad may or may not be partial). andnot $18, 0x7, $18 # .. e1 : cmpult $0, $18, $2 # e0 : beq $2, $final # .. e1 : # At least two quads remain to be accessed. subq $18, $0, $4 #-e0 : $4 <- nr quads to be processed and $4, 8, $4 # e1 : odd number of quads? bne $4, $odd_quad_count # e1 : # At least three quads remain to be accessed mov $1, $4 # e0 : move prefetched value to correct reg .align 4 $unrolled_loop: ldq $1, 8($0) #-e0 : prefetch $1 xor $17, $4, $2 # .. e1 : cmpbge $31, $2, $2 # e0 : bne $2, $found_it # .. e1 : addq $0, 8, $0 #-e0 : $odd_quad_count: xor $17, $1, $2 # .. e1 : ldq $4, 8($0) # e0 : prefetch $4 cmpbge $31, $2, $2 # .. e1 : addq $0, 8, $6 #-e0 : bne $2, $found_it # .. e1 : cmpult $6, $18, $6 # e0 : addq $0, 8, $0 # .. e1 : bne $6, $unrolled_loop #-e1 : mov $4, $1 # e0 : move prefetched value into $1 $final: subq $5, $0, $18 # .. e1 : $18 <- number of bytes left to do bne $18, $last_quad # e1 : $not_found: mov $31, $0 #-e0 : ret # .. e1 : .end memchr EXPORT_SYMBOL(memchr)
AirFortressIlikara/LS2K0300-linux-4.19
4,341
arch/alpha/lib/ev6-copy_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-copy_page.S * * Copy an entire page. */ /* The following comparison of this routine vs the normal copy_page.S was written by an unnamed ev6 hardware designer and forwarded to me via Steven Hobbs <hobbs@steven.zko.dec.com>. First Problem: STQ overflows. ----------------------------- It would be nice if EV6 handled every resource overflow efficiently, but for some it doesn't. Including store queue overflows. It causes a trap and a restart of the pipe. To get around this we sometimes use (to borrow a term from a VSSAD researcher) "aeration". The idea is to slow the rate at which the processor receives valid instructions by inserting nops in the fetch path. In doing so, you can prevent the overflow and actually make the code run faster. You can, of course, take advantage of the fact that the processor can fetch at most 4 aligned instructions per cycle. I inserted enough nops to force it to take 10 cycles to fetch the loop code. In theory, EV6 should be able to execute this loop in 9 cycles but I was not able to get it to run that fast -- the initial conditions were such that I could not reach this optimum rate on (chaotic) EV6. I wrote the code such that everything would issue in order. Second Problem: Dcache index matches. ------------------------------------- If you are going to use this routine on random aligned pages, there is a 25% chance that the pages will be at the same dcache indices. This results in many nasty memory traps without care. The solution is to schedule the prefetches to avoid the memory conflicts. I schedule the wh64 prefetches farther ahead of the read prefetches to avoid this problem. Third Problem: Needs more prefetching. -------------------------------------- In order to improve the code I added deeper prefetching to take the most advantage of EV6's bandwidth. I also prefetched the read stream. Note that adding the read prefetch forced me to add another cycle to the inner-most kernel - up to 11 from the original 8 cycles per iteration. We could improve performance further by unrolling the loop and doing multiple prefetches per cycle. I think that the code below will be very robust and fast code for the purposes of copying aligned pages. It is slower when both source and destination pages are in the dcache, but it is my guess that this is less important than the dcache miss case. */ #include <asm/export.h> .text .align 4 .global copy_page .ent copy_page copy_page: .prologue 0 /* Prefetch 5 read cachelines; write-hint 10 cache lines. */ wh64 ($16) ldl $31,0($17) ldl $31,64($17) lda $1,1*64($16) wh64 ($1) ldl $31,128($17) ldl $31,192($17) lda $1,2*64($16) wh64 ($1) ldl $31,256($17) lda $18,118 lda $1,3*64($16) wh64 ($1) nop lda $1,4*64($16) lda $2,5*64($16) wh64 ($1) wh64 ($2) lda $1,6*64($16) lda $2,7*64($16) wh64 ($1) wh64 ($2) lda $1,8*64($16) lda $2,9*64($16) wh64 ($1) wh64 ($2) lda $19,10*64($16) nop /* Main prefetching/write-hinting loop. */ 1: ldq $0,0($17) ldq $1,8($17) unop unop unop unop ldq $2,16($17) ldq $3,24($17) ldq $4,32($17) ldq $5,40($17) unop unop unop unop ldq $6,48($17) ldq $7,56($17) ldl $31,320($17) unop unop unop /* This gives the extra cycle of aeration above the minimum. */ unop unop unop unop wh64 ($19) unop unop unop stq $0,0($16) subq $18,1,$18 stq $1,8($16) unop unop stq $2,16($16) addq $17,64,$17 stq $3,24($16) stq $4,32($16) stq $5,40($16) addq $19,64,$19 unop stq $6,48($16) stq $7,56($16) addq $16,64,$16 bne $18, 1b /* Prefetch the final 5 cache lines of the read stream. */ lda $18,10 ldl $31,320($17) ldl $31,384($17) ldl $31,448($17) ldl $31,512($17) ldl $31,576($17) nop nop /* Non-prefetching, non-write-hinting cleanup loop for the final 10 cache lines. */ 2: ldq $0,0($17) ldq $1,8($17) ldq $2,16($17) ldq $3,24($17) ldq $4,32($17) ldq $5,40($17) ldq $6,48($17) ldq $7,56($17) stq $0,0($16) subq $18,1,$18 stq $1,8($16) addq $17,64,$17 stq $2,16($16) stq $3,24($16) stq $4,32($16) stq $5,40($16) stq $6,48($16) stq $7,56($16) addq $16,64,$16 bne $18, 2b ret nop unop nop .end copy_page EXPORT_SYMBOL(copy_page)
AirFortressIlikara/LS2K0300-linux-4.19
7,189
arch/alpha/lib/ev6-clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-clear_user.S * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Zero user space, handling exceptions as we go. * * We have to make sure that $0 is always up-to-date and contains the * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. * Determining actual stalls (other than slotting) doesn't appear to be easy to do. * From perusing the source code context where this routine is called, it is * a fair assumption that significant fractions of entire pages are zeroed, so * it's going to be worth the effort to hand-unroll a big loop, and use wh64. * ASSUMPTION: * The believed purpose of only updating $0 after a store is that a signal * may come along during the execution of this chunk of code, and we don't * want to leave a hole (and we also want to avoid repeating lots of work) */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exception-99b($31); \ .previous .set noat .set noreorder .align 4 .globl __clear_user .ent __clear_user .frame $30, 0, $26 .prologue 0 # Pipeline info : Slotting & Comments __clear_user: and $17, $17, $0 and $16, 7, $4 # .. E .. .. : find dest head misalignment beq $0, $zerolength # U .. .. .. : U L U L addq $0, $4, $1 # .. .. .. E : bias counter and $1, 7, $2 # .. .. E .. : number of misaligned bytes in tail # Note - we never actually use $2, so this is a moot computation # and we can rewrite this later... srl $1, 3, $1 # .. E .. .. : number of quadwords to clear beq $4, $headalign # U .. .. .. : U L U L /* * Head is not aligned. Write (8 - $4) bytes to head of destination * This means $16 is known to be misaligned */ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in beq $1, $onebyte # .. .. U .. : sub-word store? mskql $5, $16, $5 # .. U .. .. : take care of misaligned head addq $16, 8, $16 # E .. .. .. : L U U L EX( stq_u $5, -8($16) ) # .. .. .. L : subq $1, 1, $1 # .. .. E .. : addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment subq $0, 8, $0 # E .. .. .. : U L U L .align 4 /* * (The .align directive ought to be a moot point) * values upon initial entry to the loop * $1 is number of quadwords to clear (zero is a valid value) * $2 is number of trailing bytes (0..7) ($2 never used...) * $16 is known to be aligned 0mod8 */ $headalign: subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) blt $4, $trailquad # U .. .. .. : U L U L /* * We know that we're going to do at least 16 quads, which means we are * going to be able to use the large block clear loop at least once. * Figure out how many quads we need to clear before we are 0mod64 aligned * so we can use the wh64 instruction. */ nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 $alignmod64: EX( stq_u $31, 0($16) ) # .. .. .. L addq $3, 8, $3 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E subq $1, 1, $1 # .. .. E .. addq $16, 8, $16 # .. E .. .. blt $3, $alignmod64 # U .. .. .. : U L U L $bigalign: /* * $0 is the number of bytes left * $1 is the number of quads left * $16 is aligned 0mod64 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * We are _not_ going to update $0 after every single store. That * would be silly, because there will be cross-cluster dependencies * no matter how the code is scheduled. By doing it in slightly * staggered fashion, we can still do this loop in 5 fetches * The worse case will be doing two extra quads in some future execution, * in the event of an interrupted clear. * Assumes the wh64 needs to be for 2 trips through the loop in the future * The wh64 is issued on for the starting destination address for trip +2 * through the loop, and if there are less than two trips left, the target * address will be for the current trip. */ nop # E : nop # E : nop # E : bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest /* This might actually help for the current trip... */ $do_wh64: wh64 ($3) # .. .. .. L1 : memory subsystem hint subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? EX( stq_u $31, 0($16) ) # .. L .. .. subq $0, 8, $0 # E .. .. .. : U L U L addq $16, 128, $3 # E : Target address of wh64 EX( stq_u $31, 8($16) ) # L : EX( stq_u $31, 16($16) ) # L : subq $0, 16, $0 # E : U L L U nop # E : EX( stq_u $31, 24($16) ) # L : EX( stq_u $31, 32($16) ) # L : subq $0, 168, $5 # E : U L L U : two trips through the loop left? /* 168 = 192 - 24, since we've already completed some stores */ subq $0, 16, $0 # E : EX( stq_u $31, 40($16) ) # L : EX( stq_u $31, 48($16) ) # L : cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle subq $1, 8, $1 # E : subq $0, 16, $0 # E : EX( stq_u $31, 56($16) ) # L : nop # E : U L U L nop # E : subq $0, 8, $0 # E : addq $16, 64, $16 # E : bge $4, $do_wh64 # U : U L U L $trailquad: # zero to 16 quadwords left to store, plus any trailing bytes # $1 is the number of quadwords left to go. # nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go $onequad: EX( stq_u $31, 0($16) ) # .. .. .. L subq $1, 1, $1 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. addq $16, 8, $16 # .. E .. .. bgt $1, $onequad # U .. .. .. : U L U L # We have an unknown number of bytes left to go. $trailbytes: nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $0, $zerolength # U .. .. .. : U L U L # $0 contains the number of bytes left to copy (0..31) # so we will use $0 as the loop counter # We know for a fact that $0 > 0 zero due to previous context $onebyte: EX( stb $31, 0($16) ) # .. .. .. L subq $0, 1, $0 # .. .. E .. : addq $16, 1, $16 # .. E .. .. : bgt $0, $onebyte # U .. .. .. : U L U L $zerolength: $exception: # Destination for exception recovery(?) nop # .. .. .. E : nop # .. .. E .. : nop # .. E .. .. : ret $31, ($26), 1 # L0 .. .. .. : L U L U .end __clear_user EXPORT_SYMBOL(__clear_user)
AirFortressIlikara/LS2K0300-linux-4.19
1,220
arch/alpha/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * strlen.S (c) 1995 David Mosberger (davidm@cs.arizona.edu) * * Finds length of a 0-terminated string. Optimized for the * Alpha architecture: * * - memory accessed as aligned quadwords only * - uses bcmpge to compare 8 bytes in parallel * - does binary search to find 0 byte in last * quadword (HAKMEM needed 12 instructions to * do this instead of the 9 instructions that * binary search needs). */ #include <asm/export.h> .set noreorder .set noat .align 3 .globl strlen .ent strlen strlen: ldq_u $1, 0($16) # load first quadword ($16 may be misaligned) lda $2, -1($31) insqh $2, $16, $2 andnot $16, 7, $0 or $2, $1, $1 cmpbge $31, $1, $2 # $2 <- bitmask: bit i == 1 <==> i-th byte == 0 bne $2, found loop: ldq $1, 8($0) addq $0, 8, $0 # addr += 8 nop # helps dual issue last two insns cmpbge $31, $1, $2 beq $2, loop found: blbs $2, done # make aligned case fast negq $2, $3 and $2, $3, $2 and $2, 0x0f, $1 addq $0, 4, $3 cmoveq $1, $3, $0 and $2, 0x33, $1 addq $0, 2, $3 cmoveq $1, $3, $0 and $2, 0x55, $1 addq $0, 1, $3 cmoveq $1, $3, $0 done: subq $0, $16, $0 ret $31, ($26) .end strlen EXPORT_SYMBOL(strlen)
AirFortressIlikara/LS2K0300-linux-4.19
2,502
arch/alpha/lib/ev67-strncat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev67-strncat.S * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> * * Append no more than COUNT characters from the null-terminated string SRC * to the null-terminated string DST. Always null-terminate the new DST. * * This differs slightly from the semantics in libc in that we never write * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ #include <asm/export.h> .text .align 4 .globl strncat .ent strncat strncat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value beq $18, $zerocount # U : /* Find the end of the string. */ ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned) lda $2, -1($31) # E : insqh $2, $0, $2 # U : andnot $16, 7, $16 # E : nop # E : or $2, $1, $1 # E : nop # E : nop # E : cmpbge $31, $1, $2 # E : bits set iff byte == 0 bne $2, $found # U : $loop: ldq $1, 8($16) # L : addq $16, 8, $16 # E : cmpbge $31, $1, $2 # E : beq $2, $loop # U : $found: cttz $2, $3 # U0 : addq $16, $3, $16 # E : nop # E : bsr $23, __stxncpy # L0 :/* Now do the append. */ /* Worry about the null termination. */ zapnot $1, $27, $2 # U : was last byte a null? cmplt $27, $24, $5 # E : did we fill the buffer completely? bne $2, 0f # U : ret # L0 : 0: or $5, $18, $2 # E : nop bne $2, 2f # U : and $24, 0x80, $3 # E : no zero next byte nop # E : bne $3, 1f # U : /* Here there are bytes left in the current word. Clear one. */ addq $24, $24, $24 # E : end-of-count bit <<= 1 nop # E : 2: zap $1, $24, $1 # U : nop # E : stq_u $1, 0($16) # L : ret # L0 : 1: /* Here we must clear the first byte of the next DST word */ stb $31, 8($16) # L : nop # E : nop # E : ret # L0 : $zerocount: nop # E : nop # E : nop # E : ret # L0 : .end strncat EXPORT_SYMBOL(strncat)
AirFortressIlikara/LS2K0300-linux-4.19
1,032
arch/alpha/lib/strcat.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strcat.S * Contributed by Richard Henderson (rth@tamu.edu) * * Append a null-terminated string from SRC to DST. */ #include <asm/export.h> .text .align 3 .globl strcat .ent strcat strcat: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set up return value /* Find the end of the string. */ ldq_u $1, 0($16) # load first quadword (a0 may be misaligned) lda $2, -1 insqh $2, $16, $2 andnot $16, 7, $16 or $2, $1, $1 cmpbge $31, $1, $2 # bits set iff byte == 0 bne $2, $found $loop: ldq $1, 8($16) addq $16, 8, $16 cmpbge $31, $1, $2 beq $2, $loop $found: negq $2, $3 # clear all but least set bit and $2, $3, $2 and $2, 0xf0, $3 # binary search for that set bit and $2, 0xcc, $4 and $2, 0xaa, $5 cmovne $3, 4, $3 cmovne $4, 2, $4 cmovne $5, 1, $5 addq $3, $4, $3 addq $16, $5, $16 addq $16, $3, $16 /* Now do the append. */ mov $26, $23 br __stxcpy .end strcat EXPORT_SYMBOL(strcat);
AirFortressIlikara/LS2K0300-linux-4.19
2,019
arch/alpha/lib/strchr.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strchr.S * Contributed by Richard Henderson (rth@tamu.edu) * * Return the address of a given character within a null-terminated * string, or null if it is not found. */ #include <asm/export.h> #include <asm/regdef.h> .set noreorder .set noat .align 3 .globl strchr .ent strchr strchr: .frame sp, 0, ra .prologue 0 zapnot a1, 1, a1 # e0 : zero extend the search character ldq_u t0, 0(a0) # .. e1 : load first quadword sll a1, 8, t5 # e0 : replicate the search character andnot a0, 7, v0 # .. e1 : align our loop pointer or t5, a1, a1 # e0 : lda t4, -1 # .. e1 : build garbage mask sll a1, 16, t5 # e0 : cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero mskqh t4, a0, t4 # e0 : or t5, a1, a1 # .. e1 : sll a1, 32, t5 # e0 : cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage or t5, a1, a1 # e0 : xor t0, a1, t1 # .. e1 : make bytes == c zero cmpbge zero, t1, t3 # e0 : bits set iff byte == c or t2, t3, t0 # e1 : bits set iff char match or zero match andnot t0, t4, t0 # e0 : clear garbage bits bne t0, $found # .. e1 (zdb) $loop: ldq t0, 8(v0) # e0 : addq v0, 8, v0 # .. e1 : nop # e0 : xor t0, a1, t1 # .. e1 (ev5 data stall) cmpbge zero, t0, t2 # e0 : bits set iff byte == 0 cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c or t2, t3, t0 # e0 : beq t0, $loop # .. e1 (zdb) $found: negq t0, t1 # e0 : clear all but least set bit and t0, t1, t0 # e1 (stall) and t0, t3, t1 # e0 : bit set iff byte was the char beq t1, $retnull # .. e1 (zdb) and t0, 0xf0, t2 # e0 : binary search for that set bit and t0, 0xcc, t3 # .. e1 : and t0, 0xaa, t4 # e0 : cmovne t2, 4, t2 # .. e1 : cmovne t3, 2, t3 # e0 : cmovne t4, 1, t4 # .. e1 : addq t2, t3, t2 # e0 : addq v0, t4, v0 # .. e1 : addq v0, t2, v0 # e0 : ret # .. e1 : $retnull: mov zero, v0 # e0 : ret # .. e1 : .end strchr EXPORT_SYMBOL(strchr)
AirFortressIlikara/LS2K0300-linux-4.19
7,016
arch/alpha/lib/ev6-copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/ev6-copy_user.S * * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> * * Copy to/from user space, handling exceptions as we go.. This * isn't exactly pretty. * * This is essentially the same as "memcpy()", but with a few twists. * Notably, we have to make sure that $0 is always up-to-date and * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html * Scheduling notation: * E - either cluster * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ #include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitin-99b($31); \ .previous #define EXO(x,y...) \ 99: x,##y; \ .section __ex_table,"a"; \ .long 99b - .; \ lda $31, $exitout-99b($31); \ .previous .set noat .align 4 .globl __copy_user .ent __copy_user # Pipeline info: Slotting & Comments __copy_user: .prologue 0 mov $18, $0 # .. .. .. E subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy? nop # .. E .. .. beq $18, $zerolength # U .. .. .. : U L U L and $16,7,$3 # .. .. .. E : is leading dest misalignment ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) subq $3, 8, $3 # E .. .. .. : L U U L : trip counter /* * The fetcher stall also hides the 1 cycle cross-cluster stall for $3 (L --> U) * This loop aligns the destination a byte at a time * We know we have at least one trip through this loop */ $aligndest: EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG addq $3,1,$3 # .. E .. .. : nop # E .. .. .. : U L U L /* * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ EXO( stb $1,-1($16) ) # .. .. .. L : addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG subq $0,1,$0 # .. E .. .. : bne $3, $aligndest # U .. .. .. : U L U L /* * If we fell through into here, we have a minimum of 33 - 7 bytes * If we arrived via branch, we have a minimum of 32 bytes */ $destaligned: and $17,7,$1 # .. .. .. E : Check _current_ source alignment bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code beq $1,$quadaligned # U .. .. .. : U L U L /* * In the worst case, we've just executed an ldq_u here from 0($17) * and we'll repeat it once if we take the branch */ /* Misaligned quadword loop - not unrolled. Leave it that way. */ $misquad: EXI( ldq_u $2,8($17) ) # .. .. .. L : subq $4,8,$4 # .. .. E .. : extql $3,$17,$3 # .. U .. .. : extqh $2,$17,$1 # U .. .. .. : U U L L bis $3,$1,$1 # .. .. .. E : EXO( stq $1,0($16) ) # .. .. L .. : addq $17,8,$17 # .. E .. .. : subq $0,8,$0 # E .. .. .. : U L L U addq $16,8,$16 # .. .. .. E : bis $2,$2,$3 # .. .. E .. : nop # .. E .. .. : bne $4,$misquad # U .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. beq $0,$zerolength # U .. .. .. : U L U L /* We know we have at least one trip through the byte loop */ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : br $31, $dirtyentry # L0 .. .. .. : L U U L /* Do the trailing byte loop load, then hop into the store part of the loop */ /* * A minimum of (33 - 7) bytes to do a quad at a time. * Based upon the usage context, it's worth the effort to unroll this loop * $0 - number of bytes to be moved * $4 - number of bytes to move as quadwords * $16 is current destination address * $17 is current source address */ $quadaligned: subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff nop # .. .. E .. nop # .. E .. .. blt $2, $onequad # U .. .. .. : U L U L /* * There is a significant assumption here that the source and destination * addresses differ by more than 32 bytes. In this particular case, a * sparsity of registers further bounds this to be a minimum of 8 bytes. * But if this isn't met, then the output result will be incorrect. * Furthermore, due to a lack of available registers, we really can't * unroll this to be an 8x loop (which would enable us to use the wh64 * instruction memory hint instruction). */ $unroll4: EXI( ldq $1,0($17) ) # .. .. .. L EXI( ldq $2,8($17) ) # .. .. L .. subq $4,32,$4 # .. E .. .. nop # E .. .. .. : U U L L addq $17,16,$17 # .. .. .. E EXO( stq $1,0($16) ) # .. .. L .. EXO( stq $2,8($16) ) # .. L .. .. subq $0,16,$0 # E .. .. .. : U L L U addq $16,16,$16 # .. .. .. E EXI( ldq $1,0($17) ) # .. .. L .. EXI( ldq $2,8($17) ) # .. L .. .. subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? EXO( stq $1,0($16) ) # .. .. .. L EXO( stq $2,8($16) ) # .. .. L .. subq $0,16,$0 # .. E .. .. addq $17,16,$17 # E .. .. .. : U L L U nop # .. .. .. E nop # .. .. E .. addq $16,16,$16 # .. E .. .. bgt $3,$unroll4 # U .. .. .. : U L U L nop nop nop beq $4, $noquads $onequad: EXI( ldq $1,0($17) ) subq $4,8,$4 addq $17,8,$17 nop EXO( stq $1,0($16) ) subq $0,8,$0 addq $16,8,$16 bne $4,$onequad $noquads: nop nop nop beq $0,$zerolength /* * For small copies (or the tail of a larger copy), do a very simple byte loop. * There's no point in doing a lot of complex alignment calculations to try to * to quadword stuff for a small amount of data. * $0 - remaining number of bytes left to copy * $16 - current dest addr * $17 - current source addr */ $onebyteloop: EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : nop # E .. .. .. : U L U L $dirtyentry: /* * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ EXO ( stb $2,-1($16) ) # .. .. .. L : addq $17,1,$17 # .. .. E .. : quadpack as the load subq $0,1,$0 # .. E .. .. : change count _after_ copy bgt $0,$onebyteloop # U .. .. .. : U L U L $zerolength: $exitin: $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. ret $31,($26),1 # L0 .. .. .. : L U L U .end __copy_user EXPORT_SYMBOL(__copy_user)
AirFortressIlikara/LS2K0300-linux-4.19
1,572
arch/alpha/lib/strncpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/lib/strncpy.S * Contributed by Richard Henderson (rth@tamu.edu) * * Copy no more than COUNT bytes of the null-terminated string from * SRC to DST. If SRC does not cover all of COUNT, the balance is * zeroed. * * Or, rather, if the kernel cared about that weird ANSI quirk. This * version has cropped that bit o' nastiness as well as assuming that * __stxncpy is in range of a branch. */ #include <asm/export.h> .set noat .set noreorder .text .align 4 .globl strncpy .ent strncpy strncpy: .frame $30, 0, $26 .prologue 0 mov $16, $0 # set return value now beq $18, $zerolen unop bsr $23, __stxncpy # do the work of the copy unop bne $18, $multiword # do we have full words left? subq $24, 1, $3 # nope subq $27, 1, $4 or $3, $24, $3 # clear the bits between the last or $4, $27, $4 # written byte and the last byte in COUNT andnot $3, $4, $4 zap $1, $4, $1 stq_u $1, 0($16) ret .align 4 $multiword: subq $27, 1, $2 # clear the final bits in the prev word or $2, $27, $2 zapnot $1, $2, $1 subq $18, 1, $18 stq_u $1, 0($16) addq $16, 8, $16 unop beq $18, 1f nop unop nop blbc $18, 0f stq_u $31, 0($16) # zero one word subq $18, 1, $18 addq $16, 8, $16 beq $18, 1f 0: stq_u $31, 0($16) # zero two words subq $18, 2, $18 stq_u $31, 8($16) addq $16, 16, $16 bne $18, 0b 1: ldq_u $1, 0($16) # clear the leading bits in the final word subq $24, 1, $2 or $2, $24, $2 zap $1, $2, $1 stq_u $1, 0($16) $zerolen: ret .end strncpy EXPORT_SYMBOL(strncpy)
AirFortressIlikara/LS2K0300-linux-4.19
1,845
arch/alpha/boot/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/alpha/boot/head.S * * initial bootloader stuff.. */ #include <asm/pal.h> .set noreorder .globl __start .ent __start __start: br $29,2f 2: ldgp $29,0($29) jsr $26,start_kernel call_pal PAL_halt .end __start .align 5 .globl wrent .ent wrent wrent: .prologue 0 call_pal PAL_wrent ret ($26) .end wrent .align 5 .globl wrkgp .ent wrkgp wrkgp: .prologue 0 call_pal PAL_wrkgp ret ($26) .end wrkgp .align 5 .globl switch_to_osf_pal .ent switch_to_osf_pal switch_to_osf_pal: subq $30,128,$30 .frame $30,128,$26 stq $26,0($30) stq $1,8($30) stq $2,16($30) stq $3,24($30) stq $4,32($30) stq $5,40($30) stq $6,48($30) stq $7,56($30) stq $8,64($30) stq $9,72($30) stq $10,80($30) stq $11,88($30) stq $12,96($30) stq $13,104($30) stq $14,112($30) stq $15,120($30) .prologue 0 stq $30,0($17) /* save KSP in PCB */ bis $30,$30,$20 /* a4 = KSP */ br $17,1f ldq $26,0($30) ldq $1,8($30) ldq $2,16($30) ldq $3,24($30) ldq $4,32($30) ldq $5,40($30) ldq $6,48($30) ldq $7,56($30) ldq $8,64($30) ldq $9,72($30) ldq $10,80($30) ldq $11,88($30) ldq $12,96($30) ldq $13,104($30) ldq $14,112($30) ldq $15,120($30) addq $30,128,$30 ret ($26) 1: call_pal PAL_swppal .end switch_to_osf_pal .align 3 .globl tbi .ent tbi tbi: .prologue 0 call_pal PAL_tbi ret ($26) .end tbi .align 3 .globl halt .ent halt halt: .prologue 0 call_pal PAL_halt .end halt /* $16 - new stack page */ .align 3 .globl move_stack .ent move_stack move_stack: .prologue 0 lda $0, 0x1fff($31) and $0, $30, $1 /* Stack offset */ or $1, $16, $16 /* New stack pointer */ mov $30, $1 mov $16, $2 1: ldq $3, 0($1) /* Move the stack */ addq $1, 8, $1 stq $3, 0($2) and $0, $1, $4 addq $2, 8, $2 bne $4, 1b mov $16, $30 ret ($26) .end move_stack
AirFortressIlikara/LS2K0300-linux-4.19
1,318
arch/mips/power/hibernate_asm.S
/* * Hibernation support specific for mips - temporary page tables * * Licensed under the GPLv2 * * Copyright (C) 2009 Lemote Inc. * Author: Hu Hongbing <huhb@lemote.com> * Wu Zhangjin <wuzhangjin@gmail.com> */ #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/asm.h> .text LEAF(swsusp_arch_save) PTR_LA t0, saved_regs PTR_S ra, PT_R31(t0) PTR_S sp, PT_R29(t0) PTR_S fp, PT_R30(t0) PTR_S gp, PT_R28(t0) PTR_S s0, PT_R16(t0) PTR_S s1, PT_R17(t0) PTR_S s2, PT_R18(t0) PTR_S s3, PT_R19(t0) PTR_S s4, PT_R20(t0) PTR_S s5, PT_R21(t0) PTR_S s6, PT_R22(t0) PTR_S s7, PT_R23(t0) j swsusp_save END(swsusp_arch_save) LEAF(restore_image) jal local_flush_tlb_all nop PTR_L t0, restore_pblist 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ PTR_ADDU t3, t1, _PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) PTR_ADDIU t1, t1, SZREG PTR_ADDIU t2, t2, SZREG bne t1, t3, 1b PTR_L t0, PBE_NEXT(t0) bnez t0, 0b PTR_LA t0, saved_regs PTR_L ra, PT_R31(t0) PTR_L sp, PT_R29(t0) PTR_L fp, PT_R30(t0) PTR_L gp, PT_R28(t0) PTR_L s0, PT_R16(t0) PTR_L s1, PT_R17(t0) PTR_L s2, PT_R18(t0) PTR_L s3, PT_R19(t0) PTR_L s4, PT_R20(t0) PTR_L s5, PT_R21(t0) PTR_L s6, PT_R22(t0) PTR_L s7, PT_R23(t0) PTR_LI v0, 0x0 jr ra END(restore_image)
AirFortressIlikara/LS2K0300-linux-4.19
2,750
arch/mips/vdso/vdso.lds.S
/* * Copyright (C) 2015 Imagination Technologies * Author: Alex Smith <alex.smith@imgtec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/sgidefs.h> #if _MIPS_SIM == _MIPS_SIM_ABI64 OUTPUT_FORMAT("elf64-tradlittlemips", "elf64-tradbigmips", "elf64-tradlittlemips") #elif _MIPS_SIM == _MIPS_SIM_NABI32 OUTPUT_FORMAT("elf32-ntradlittlemips", "elf32-ntradbigmips", "elf32-ntradlittlemips") #else OUTPUT_FORMAT("elf32-tradlittlemips", "elf32-tradbigmips", "elf32-tradlittlemips") #endif OUTPUT_ARCH(mips) SECTIONS { PROVIDE(_start = .); . = SIZEOF_HEADERS; /* * In order to retain compatibility with older toolchains we provide the * ABI flags section ourself. Newer assemblers will automatically * generate .MIPS.abiflags sections so we discard such input sections, * and then manually define our own section here. genvdso will patch * this section to have the correct name/type. */ .mips_abiflags : { *(.mips_abiflags) } :text :abiflags .reginfo : { *(.reginfo) } :text :reginfo .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .text : { *(.text*) } :text PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text _end = .; PROVIDE(end = .); /DISCARD/ : { *(.MIPS.abiflags) *(.gnu.attributes) *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } PHDRS { /* * Provide a PT_MIPS_ABIFLAGS header to assign the ABI flags section * to. We can specify the header type directly here so no modification * is needed later on. */ abiflags 0x70000003; /* * The ABI flags header must exist directly after the PT_INTERP header, * so we must explicitly place the PT_MIPS_REGINFO header after it to * stop the linker putting one in at the start. */ reginfo 0x70000000; text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } VERSION { LINUX_2.6 { #ifndef DISABLE_MIPS_VDSO global: __vdso_clock_gettime; __vdso_gettimeofday; __vdso_getpid; __vdso_getuid; #endif local: *; }; }
AirFortressIlikara/LS2K0300-linux-4.19
1,803
arch/mips/vdso/elf.S
/* * Copyright (C) 2015 Imagination Technologies * Author: Alex Smith <alex.smith@imgtec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include "vdso.h" #include <asm/isa-rev.h> #include <linux/elfnote.h> #include <linux/version.h> ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END /* * The .MIPS.abiflags section must be defined with the FP ABI flags set * to 'any' to be able to link with both old and new libraries. * Newer toolchains are capable of automatically generating this, but we want * to work with older toolchains as well. Therefore, we define the contents of * this section here (under different names), and then genvdso will patch * it to have the correct name and type. * * We base the .MIPS.abiflags section on preprocessor definitions rather than * CONFIG_* because we need to match the particular ABI we are building the * VDSO for. * * See https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking * for the .MIPS.abiflags section description. */ .section .mips_abiflags, "a" .align 3 __mips_abiflags: .hword 0 /* version */ .byte __mips /* isa_level */ /* isa_rev */ .byte MIPS_ISA_REV /* gpr_size */ #ifdef __mips64 .byte 2 /* AFL_REG_64 */ #else .byte 1 /* AFL_REG_32 */ #endif /* cpr1_size */ #if (MIPS_ISA_REV >= 6) || defined(__mips64) .byte 2 /* AFL_REG_64 */ #else .byte 1 /* AFL_REG_32 */ #endif .byte 0 /* cpr2_size (AFL_REG_NONE) */ .byte 0 /* fp_abi (Val_GNU_MIPS_ABI_FP_ANY) */ .word 0 /* isa_ext */ .word 0 /* ases */ .word 0 /* flags1 */ .word 0 /* flags2 */
AirFortressIlikara/LS2K0300-linux-4.19
7,649
arch/mips/dec/int-handler.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1995, 1996, 1997 Paul M. Antoine and Harald Koerfgen * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki * * Written by Ralf Baechle and Andreas Busse, modified for DECstation * support by Paul Antoine and Harald Koerfgen. * * completely rewritten: * Copyright (C) 1998 Harald Koerfgen * * Rewritten extensively for controller-driven IRQ support * by Maciej W. Rozycki. */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/dec/interrupts.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/ioasic_ints.h> #include <asm/dec/kn01.h> #include <asm/dec/kn02.h> #include <asm/dec/kn02xa.h> #include <asm/dec/kn03.h> #define KN02_CSR_BASE CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR) #define KN02XA_IOASIC_BASE CKSEG1ADDR(KN02XA_SLOT_BASE + IOASIC_IOCTL) #define KN03_IOASIC_BASE CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_IOCTL) .text .set noreorder /* * plat_irq_dispatch: Interrupt handler for DECstations * * We follow the model in the Indy interrupt code by David Miller, where he * says: a lot of complication here is taken away because: * * 1) We handle one interrupt and return, sitting in a loop * and moving across all the pending IRQ bits in the cause * register is _NOT_ the answer, the common case is one * pending IRQ so optimize in that direction. * * 2) We need not check against bits in the status register * IRQ mask, that would make this routine slow as hell. * * 3) Linux only thinks in terms of all IRQs on or all IRQs * off, nothing in between like BSD spl() brain-damage. * * Furthermore, the IRQs on the DECstations look basically (barring * software IRQs which we don't use at all) like... * * DS2100/3100's, aka kn01, aka Pmax: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 SCSI * 3 Lance Ethernet * 4 DZ11 serial * 5 RTC * 6 Memory Controller & Video * 7 FPU * * DS5000/200, aka kn02, aka 3max: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 TurboChannel * 3 RTC * 4 Reserved * 5 Memory Controller * 6 Reserved * 7 FPU * * DS5000/1xx's, aka kn02ba, aka 3min: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 TurboChannel Slot 0 * 3 TurboChannel Slot 1 * 4 TurboChannel Slot 2 * 5 TurboChannel Slot 3 (ASIC) * 6 Halt button * 7 FPU/R4k timer * * DS5000/2x's, aka kn02ca, aka maxine: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 Periodic Interrupt (100usec) * 3 RTC * 4 I/O write timeout * 5 TurboChannel (ASIC) * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER) * 7 FPU/R4k timer * * DS5000/2xx's, aka kn03, aka 3maxplus: * * MIPS IRQ Source * -------- ------ * 0 Software (ignored) * 1 Software (ignored) * 2 System Board (ASIC) * 3 RTC * 4 Reserved * 5 Memory * 6 Halt Button * 7 FPU/R4k timer * * We handle the IRQ according to _our_ priority (see setup.c), * then we just return. If multiple IRQs are pending then we will * just take another exception, big deal. */ .align 5 NESTED(plat_irq_dispatch, PT_SIZE, ra) .set noreorder /* * Get pending Interrupts */ mfc0 t0,CP0_CAUSE # get pending interrupts mfc0 t1,CP0_STATUS #ifdef CONFIG_32BIT lw t2,cpu_fpu_mask #endif andi t0,ST0_IM # CAUSE.CE may be non-zero! and t0,t1 # isolate allowed ones beqz t0,spurious #ifdef CONFIG_32BIT and t2,t0 bnez t2,fpu # handle FPU immediately #endif /* * Find irq with highest priority */ # open coded PTR_LA t1, cpu_mask_nr_tbl #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, cpu_mask_nr_tbl lui t1, %hi(cpu_mask_nr_tbl) addiu t1, %lo(cpu_mask_nr_tbl) #else #error GCC `-msym32' option required for 64-bit DECstation builds #endif 1: lw t2,(t1) nop and t2,t0 beqz t2,1b addu t1,2*PTRSIZE # delay slot /* * Do the low-level stuff */ lw a0,(-PTRSIZE)(t1) nop bgez a0,handle_it # irq_nr >= 0? # irq_nr < 0: it is an address nop jr a0 # a trick to save a branch: lui t2,(KN03_IOASIC_BASE>>16)&0xffff # upper part of IOASIC Address /* * Handle "IRQ Controller" Interrupts * Masked Interrupts are still visible and have to be masked "by hand". */ FEXPORT(kn02_io_int) # 3max lui t0,(KN02_CSR_BASE>>16)&0xffff # get interrupt status and mask lw t0,(t0) nop andi t1,t0,KN02_IRQ_ALL b 1f srl t0,16 # shift interrupt mask FEXPORT(kn02xa_io_int) # 3min/maxine lui t2,(KN02XA_IOASIC_BASE>>16)&0xffff # upper part of IOASIC Address FEXPORT(kn03_io_int) # 3max+ (t2 loaded earlier) lw t0,IO_REG_SIR(t2) # get status: IOASIC sir lw t1,IO_REG_SIMR(t2) # get mask: IOASIC simr nop 1: and t0,t1 # mask out allowed ones beqz t0,spurious /* * Find irq with highest priority */ # open coded PTR_LA t1,asic_mask_nr_tbl #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, asic_mask_nr_tbl lui t1, %hi(asic_mask_nr_tbl) addiu t1, %lo(asic_mask_nr_tbl) #else #error GCC `-msym32' option required for 64-bit DECstation builds #endif 2: lw t2,(t1) nop and t2,t0 beq zero,t2,2b addu t1,2*PTRSIZE # delay slot /* * Do the low-level stuff */ lw a0,%lo(-PTRSIZE)(t1) nop bgez a0,handle_it # irq_nr >= 0? # irq_nr < 0: it is an address nop jr a0 nop # delay slot /* * Dispatch low-priority interrupts. We reconsider all status * bits again, which looks like a lose, but it makes the code * simple and O(log n), so it gets compensated. */ FEXPORT(cpu_all_int) # HALT, timers, software junk li a0,DEC_CPU_IRQ_BASE srl t0,CAUSEB_IP li t1,CAUSEF_IP>>CAUSEB_IP # mask b 1f li t2,4 # nr of bits / 2 FEXPORT(kn02_all_int) # impossible ? li a0,KN02_IRQ_BASE li t1,KN02_IRQ_ALL # mask b 1f li t2,4 # nr of bits / 2 FEXPORT(asic_all_int) # various I/O ASIC junk li a0,IO_IRQ_BASE li t1,IO_IRQ_ALL # mask b 1f li t2,8 # nr of bits / 2 /* * Dispatch DMA interrupts -- O(log n). */ FEXPORT(asic_dma_int) # I/O ASIC DMA events li a0,IO_IRQ_BASE+IO_INR_DMA srl t0,IO_INR_DMA li t1,IO_IRQ_DMA>>IO_INR_DMA # mask li t2,8 # nr of bits / 2 /* * Find irq with highest priority. * Highest irq number takes precedence. */ 1: srlv t3,t1,t2 2: xor t1,t3 and t3,t0,t1 beqz t3,3f nop move t0,t3 addu a0,t2 3: srl t2,1 bnez t2,2b srlv t3,t1,t2 handle_it: j dec_irq_dispatch nop #ifdef CONFIG_32BIT fpu: lw t0,fpu_kstat_irq nop lw t1,(t0) nop addu t1,1 j handle_fpe_int sw t1,(t0) #endif spurious: j spurious_interrupt nop END(plat_irq_dispatch) /* * Generic unimplemented interrupt routines -- cpu_mask_nr_tbl * and asic_mask_nr_tbl are initialized to point all interrupts here. * The tables are then filled in by machine-specific initialisation * in dec_setup(). */ FEXPORT(dec_intr_unimplemented) move a1,t0 # cheats way of printing an arg! PANIC("Unimplemented cpu interrupt! CP0_CAUSE: 0x%08x"); FEXPORT(asic_intr_unimplemented) move a1,t0 # cheats way of printing an arg! PANIC("Unimplemented asic interrupt! ASIC ISR: 0x%08x");
AirFortressIlikara/LS2K0300-linux-4.19
4,360
arch/mips/kernel/head.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995 Waldorf Electronics * Written by Ralf Baechle and Andreas Busse * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle * Copyright (C) 1996 Paul M. Antoine * Modified for DECStation and hence R3000 support by Paul M. Antoine * Further modifications by David S. Miller and Harald Koerfgen * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <kernel-entry-init.h> /* * For the moment disable interrupts, mark the kernel mode and * set ST0_KX so that the CPU does not spit fire when using * 64-bit addresses. A full initialization of the CPU's status * register is done later in per_cpu_trap_init(). */ .macro setup_c0_status set clr .set push mfc0 t0, CP0_STATUS or t0, ST0_CU0|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder sll zero,3 # ehb .set pop .endm .macro setup_c0_status_pri #ifdef CONFIG_64BIT #ifdef CONFIG_CPU_LOONGSON3 setup_c0_status ST0_KX|ST0_MM 0 #else setup_c0_status ST0_KX 0 #endif #else #ifdef CONFIG_CPU_LOONGSON3 setup_c0_status ST0_MM 0 #else setup_c0_status 0 0 #endif #endif .endm .macro setup_c0_status_sec #ifdef CONFIG_64BIT #ifdef CONFIG_CPU_LOONGSON3 setup_c0_status ST0_KX|ST0_MM ST0_BEV #else setup_c0_status ST0_KX ST0_BEV #endif #else #ifdef CONFIG_CPU_LOONGSON3 setup_c0_status ST0_MM ST0_BEV #else setup_c0_status 0 ST0_BEV #endif #endif .endm #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. * Necessary for machines which link their kernels at KSEG0. */ .fill 0x400 #endif EXPORT(_stext) #ifdef CONFIG_BOOT_RAW /* * Give us a fighting chance of running if execution beings at the * kernel load address. This is needed because this platform does * not have a ELF loader yet. */ FEXPORT(__kernel_entry) j kernel_entry #endif __REF NESTED(kernel_entry, 16, sp) # kernel entry point kernel_entry_setup # cpu specific setup setup_c0_status_pri /* We might not get launched at the address the kernel is linked to, so we jump there. */ PTR_LA t0, 0f jr t0 0: #ifdef CONFIG_USE_OF #ifdef CONFIG_MIPS_RAW_APPENDED_DTB PTR_LA t2, __appended_dtb #ifdef CONFIG_CPU_BIG_ENDIAN li t1, 0xd00dfeed #else li t1, 0xedfe0dd0 #endif lw t0, (t2) beq t0, t1, dtb_found #endif li t1, -2 move t2, a1 beq a0, t1, dtb_found li t2, 0 dtb_found: #endif PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE 1: PTR_ADDIU t0, LONGSIZE LONG_S zero, (t0) bne t0, t1, 1b LONG_S a0, fw_arg0 # firmware arguments LONG_S a1, fw_arg1 LONG_S a2, fw_arg2 LONG_S a3, fw_arg3 #ifdef CONFIG_USE_OF LONG_S t2, fw_passed_dtb #endif MTC0 zero, CP0_CONTEXT # clear context register PTR_LA $28, init_thread_union /* Set the SP after an empty pt_regs. */ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE PTR_ADDU sp, $28 back_to_back_c0_hazard set_saved_sp sp, t0, t1 PTR_SUBU sp, 4 * SZREG # init stack pointer #ifdef CONFIG_RELOCATABLE /* Copy kernel and apply the relocations */ jal relocate_kernel /* Repoint the sp into the new kernel image */ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE PTR_ADDU sp, $28 set_saved_sp sp, t0, t1 PTR_SUBU sp, 4 * SZREG # init stack pointer /* * relocate_kernel returns the entry point either * in the relocated kernel or the original if for * some reason relocation failed - jump there now * with instruction hazard barrier because of the * newly sync'd icache. */ jr.hb v0 #else j start_kernel #endif END(kernel_entry) #ifdef CONFIG_SMP /* * SMP slave cpus entry point. Board specific code for bootstrap calls this * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) smp_slave_setup setup_c0_status_sec j start_secondary END(smp_bootstrap) #endif /* CONFIG_SMP */
AirFortressIlikara/LS2K0300-linux-4.19
11,586
arch/mips/kernel/r4k_fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle * * Multi-arch abstraction and asm macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. * Copyright (C) 1999, 2001 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ #undef fp .macro EX insn, reg, src .set push SET_HARDFLOAT .set nomacro .ex\@: \insn \reg, \src .set pop .section __ex_table,"a" PTR .ex\@, fault .previous .endm /* * Save a thread's fp context. */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 jr ra END(_save_fp) /* * Restore a thread's fp context. */ LEAF(_restore_fp) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 jr ra END(_restore_fp) #ifdef CONFIG_CPU_HAS_MSA /* * Save a thread's MSA vector context. */ LEAF(_save_msa) EXPORT_SYMBOL(_save_msa) msa_save_all a0 jr ra END(_save_msa) /* * Restore a thread's MSA vector context. */ LEAF(_restore_msa) msa_restore_all a0 jr ra END(_restore_msa) LEAF(_init_msa_upper) msa_init_all_upper jr ra END(_init_msa_upper) #endif /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double * precision represents signaling NANS. * * The value to initialize fcr31 to comes in $a0. */ .set push SET_HARDFLOAT LEAF(_init_fpu) mfc0 t0, CP0_STATUS li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS enable_fpu_hazard ctc1 a0, fcr31 li t1, -1 # SNaN #ifdef CONFIG_64BIT sll t0, t0, 5 bgez t0, 1f # 16 / 32 register mode? dmtc1 t1, $f1 dmtc1 t1, $f3 dmtc1 t1, $f5 dmtc1 t1, $f7 dmtc1 t1, $f9 dmtc1 t1, $f11 dmtc1 t1, $f13 dmtc1 t1, $f15 dmtc1 t1, $f17 dmtc1 t1, $f19 dmtc1 t1, $f21 dmtc1 t1, $f23 dmtc1 t1, $f25 dmtc1 t1, $f27 dmtc1 t1, $f29 dmtc1 t1, $f31 1: #endif #ifdef CONFIG_CPU_MIPS32 mtc1 t1, $f0 mtc1 t1, $f1 mtc1 t1, $f2 mtc1 t1, $f3 mtc1 t1, $f4 mtc1 t1, $f5 mtc1 t1, $f6 mtc1 t1, $f7 mtc1 t1, $f8 mtc1 t1, $f9 mtc1 t1, $f10 mtc1 t1, $f11 mtc1 t1, $f12 mtc1 t1, $f13 mtc1 t1, $f14 mtc1 t1, $f15 mtc1 t1, $f16 mtc1 t1, $f17 mtc1 t1, $f18 mtc1 t1, $f19 mtc1 t1, $f20 mtc1 t1, $f21 mtc1 t1, $f22 mtc1 t1, $f23 mtc1 t1, $f24 mtc1 t1, $f25 mtc1 t1, $f26 mtc1 t1, $f27 mtc1 t1, $f28 mtc1 t1, $f29 mtc1 t1, $f30 mtc1 t1, $f31 #if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) .set push .set MIPS_ISA_LEVEL_RAW .set fp=64 sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip setting upper 32b mthc1 t1, $f0 mthc1 t1, $f1 mthc1 t1, $f2 mthc1 t1, $f3 mthc1 t1, $f4 mthc1 t1, $f5 mthc1 t1, $f6 mthc1 t1, $f7 mthc1 t1, $f8 mthc1 t1, $f9 mthc1 t1, $f10 mthc1 t1, $f11 mthc1 t1, $f12 mthc1 t1, $f13 mthc1 t1, $f14 mthc1 t1, $f15 mthc1 t1, $f16 mthc1 t1, $f17 mthc1 t1, $f18 mthc1 t1, $f19 mthc1 t1, $f20 mthc1 t1, $f21 mthc1 t1, $f22 mthc1 t1, $f23 mthc1 t1, $f24 mthc1 t1, $f25 mthc1 t1, $f26 mthc1 t1, $f27 mthc1 t1, $f28 mthc1 t1, $f29 mthc1 t1, $f30 mthc1 t1, $f31 1: .set pop #endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ #else .set MIPS_ISA_ARCH_LEVEL_RAW dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 dmtc1 t1, $f6 dmtc1 t1, $f8 dmtc1 t1, $f10 dmtc1 t1, $f12 dmtc1 t1, $f14 dmtc1 t1, $f16 dmtc1 t1, $f18 dmtc1 t1, $f20 dmtc1 t1, $f22 dmtc1 t1, $f24 dmtc1 t1, $f26 dmtc1 t1, $f28 dmtc1 t1, $f30 #endif jr ra END(_init_fpu) .set pop /* SET_HARDFLOAT */ .set noreorder /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Save FP context, including the 32 FP data registers and the FP * control & status register, from the FPU to signal context. */ LEAF(_save_fp_context) .set push SET_HARDFLOAT cfc1 t1, fcr31 .set pop #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 nop #endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, 8(a0) EX sdc1 $f3, 24(a0) EX sdc1 $f5, 40(a0) EX sdc1 $f7, 56(a0) EX sdc1 $f9, 72(a0) EX sdc1 $f11, 88(a0) EX sdc1 $f13, 104(a0) EX sdc1 $f15, 120(a0) EX sdc1 $f17, 136(a0) EX sdc1 $f19, 152(a0) EX sdc1 $f21, 168(a0) EX sdc1 $f23, 184(a0) EX sdc1 $f25, 200(a0) EX sdc1 $f27, 216(a0) EX sdc1 $f29, 232(a0) EX sdc1 $f31, 248(a0) 1: .set pop #endif .set push SET_HARDFLOAT /* Store the 16 even double precision registers */ EX sdc1 $f0, 0(a0) EX sdc1 $f2, 16(a0) EX sdc1 $f4, 32(a0) EX sdc1 $f6, 48(a0) EX sdc1 $f8, 64(a0) EX sdc1 $f10, 80(a0) EX sdc1 $f12, 96(a0) EX sdc1 $f14, 112(a0) EX sdc1 $f16, 128(a0) EX sdc1 $f18, 144(a0) EX sdc1 $f20, 160(a0) EX sdc1 $f22, 176(a0) EX sdc1 $f24, 192(a0) EX sdc1 $f26, 208(a0) EX sdc1 $f28, 224(a0) EX sdc1 $f30, 240(a0) EX sw t1, 0(a1) jr ra li v0, 0 # success .set pop END(_save_fp_context) /** * _restore_fp_context() - restore FP context to the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Restore FP context, including the 32 FP data registers and the FP * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) EX lw t1, 0(a1) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 nop #endif EX ldc1 $f1, 8(a0) EX ldc1 $f3, 24(a0) EX ldc1 $f5, 40(a0) EX ldc1 $f7, 56(a0) EX ldc1 $f9, 72(a0) EX ldc1 $f11, 88(a0) EX ldc1 $f13, 104(a0) EX ldc1 $f15, 120(a0) EX ldc1 $f17, 136(a0) EX ldc1 $f19, 152(a0) EX ldc1 $f21, 168(a0) EX ldc1 $f23, 184(a0) EX ldc1 $f25, 200(a0) EX ldc1 $f27, 216(a0) EX ldc1 $f29, 232(a0) EX ldc1 $f31, 248(a0) 1: .set pop #endif .set push SET_HARDFLOAT EX ldc1 $f0, 0(a0) EX ldc1 $f2, 16(a0) EX ldc1 $f4, 32(a0) EX ldc1 $f6, 48(a0) EX ldc1 $f8, 64(a0) EX ldc1 $f10, 80(a0) EX ldc1 $f12, 96(a0) EX ldc1 $f14, 112(a0) EX ldc1 $f16, 128(a0) EX ldc1 $f18, 144(a0) EX ldc1 $f20, 160(a0) EX ldc1 $f22, 176(a0) EX ldc1 $f24, 192(a0) EX ldc1 $f26, 208(a0) EX ldc1 $f28, 224(a0) EX ldc1 $f30, 240(a0) ctc1 t1, fcr31 .set pop jr ra li v0, 0 # success END(_restore_fp_context) #ifdef CONFIG_CPU_HAS_MSA .macro op_one_wr op, idx, base .align 4 \idx: \op \idx, 0, \base jr ra nop .endm .macro op_msa_wr name, op LEAF(\name) .set push .set noreorder sll t0, a0, 4 PTR_LA t1, 0f PTR_ADDU t0, t0, t1 jr t0 nop op_one_wr \op, 0, a1 op_one_wr \op, 1, a1 op_one_wr \op, 2, a1 op_one_wr \op, 3, a1 op_one_wr \op, 4, a1 op_one_wr \op, 5, a1 op_one_wr \op, 6, a1 op_one_wr \op, 7, a1 op_one_wr \op, 8, a1 op_one_wr \op, 9, a1 op_one_wr \op, 10, a1 op_one_wr \op, 11, a1 op_one_wr \op, 12, a1 op_one_wr \op, 13, a1 op_one_wr \op, 14, a1 op_one_wr \op, 15, a1 op_one_wr \op, 16, a1 op_one_wr \op, 17, a1 op_one_wr \op, 18, a1 op_one_wr \op, 19, a1 op_one_wr \op, 20, a1 op_one_wr \op, 21, a1 op_one_wr \op, 22, a1 op_one_wr \op, 23, a1 op_one_wr \op, 24, a1 op_one_wr \op, 25, a1 op_one_wr \op, 26, a1 op_one_wr \op, 27, a1 op_one_wr \op, 28, a1 op_one_wr \op, 29, a1 op_one_wr \op, 30, a1 op_one_wr \op, 31, a1 .set pop END(\name) .endm op_msa_wr read_msa_wr_b, st_b op_msa_wr read_msa_wr_h, st_h op_msa_wr read_msa_wr_w, st_w op_msa_wr read_msa_wr_d, st_d op_msa_wr write_msa_wr_b, ld_b op_msa_wr write_msa_wr_h, ld_h op_msa_wr write_msa_wr_w, ld_w op_msa_wr write_msa_wr_d, ld_d #endif /* CONFIG_CPU_HAS_MSA */ #ifdef CONFIG_CPU_HAS_MSA .macro save_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT copy_s_d \wr, 1 EX sd $1, \off(\base) #elif defined(CONFIG_CPU_LITTLE_ENDIAN) copy_s_w \wr, 2 EX sw $1, \off(\base) copy_s_w \wr, 3 EX sw $1, (\off+4)(\base) #else /* CONFIG_CPU_BIG_ENDIAN */ copy_s_w \wr, 2 EX sw $1, (\off+4)(\base) copy_s_w \wr, 3 EX sw $1, \off(\base) #endif .set pop .endm LEAF(_save_msa_all_upper) save_msa_upper 0, 0x00, a0 save_msa_upper 1, 0x08, a0 save_msa_upper 2, 0x10, a0 save_msa_upper 3, 0x18, a0 save_msa_upper 4, 0x20, a0 save_msa_upper 5, 0x28, a0 save_msa_upper 6, 0x30, a0 save_msa_upper 7, 0x38, a0 save_msa_upper 8, 0x40, a0 save_msa_upper 9, 0x48, a0 save_msa_upper 10, 0x50, a0 save_msa_upper 11, 0x58, a0 save_msa_upper 12, 0x60, a0 save_msa_upper 13, 0x68, a0 save_msa_upper 14, 0x70, a0 save_msa_upper 15, 0x78, a0 save_msa_upper 16, 0x80, a0 save_msa_upper 17, 0x88, a0 save_msa_upper 18, 0x90, a0 save_msa_upper 19, 0x98, a0 save_msa_upper 20, 0xa0, a0 save_msa_upper 21, 0xa8, a0 save_msa_upper 22, 0xb0, a0 save_msa_upper 23, 0xb8, a0 save_msa_upper 24, 0xc0, a0 save_msa_upper 25, 0xc8, a0 save_msa_upper 26, 0xd0, a0 save_msa_upper 27, 0xd8, a0 save_msa_upper 28, 0xe0, a0 save_msa_upper 29, 0xe8, a0 save_msa_upper 30, 0xf0, a0 save_msa_upper 31, 0xf8, a0 jr ra li v0, 0 END(_save_msa_all_upper) .macro restore_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT EX ld $1, \off(\base) insert_d \wr, 1 #elif defined(CONFIG_CPU_LITTLE_ENDIAN) EX lw $1, \off(\base) insert_w \wr, 2 EX lw $1, (\off+4)(\base) insert_w \wr, 3 #else /* CONFIG_CPU_BIG_ENDIAN */ EX lw $1, (\off+4)(\base) insert_w \wr, 2 EX lw $1, \off(\base) insert_w \wr, 3 #endif .set pop .endm LEAF(_restore_msa_all_upper) restore_msa_upper 0, 0x00, a0 restore_msa_upper 1, 0x08, a0 restore_msa_upper 2, 0x10, a0 restore_msa_upper 3, 0x18, a0 restore_msa_upper 4, 0x20, a0 restore_msa_upper 5, 0x28, a0 restore_msa_upper 6, 0x30, a0 restore_msa_upper 7, 0x38, a0 restore_msa_upper 8, 0x40, a0 restore_msa_upper 9, 0x48, a0 restore_msa_upper 10, 0x50, a0 restore_msa_upper 11, 0x58, a0 restore_msa_upper 12, 0x60, a0 restore_msa_upper 13, 0x68, a0 restore_msa_upper 14, 0x70, a0 restore_msa_upper 15, 0x78, a0 restore_msa_upper 16, 0x80, a0 restore_msa_upper 17, 0x88, a0 restore_msa_upper 18, 0x90, a0 restore_msa_upper 19, 0x98, a0 restore_msa_upper 20, 0xa0, a0 restore_msa_upper 21, 0xa8, a0 restore_msa_upper 22, 0xb0, a0 restore_msa_upper 23, 0xb8, a0 restore_msa_upper 24, 0xc0, a0 restore_msa_upper 25, 0xc8, a0 restore_msa_upper 26, 0xd0, a0 restore_msa_upper 27, 0xd8, a0 restore_msa_upper 28, 0xe0, a0 restore_msa_upper 29, 0xe8, a0 restore_msa_upper 30, 0xf0, a0 restore_msa_upper 31, 0xf8, a0 jr ra li v0, 0 END(_restore_msa_all_upper) #endif /* CONFIG_CPU_HAS_MSA */ .set reorder .type fault, @function .ent fault fault: li v0, -EFAULT # failure jr ra .end fault
AirFortressIlikara/LS2K0300-linux-4.19
3,799
arch/mips/kernel/r2300_fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1998 by Ralf Baechle * * Multi-arch abstraction and asm macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Further modifications to make this work: * Copyright (c) 1998 Harald Koerfgen */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #define EX(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ PTR 9b,fault; \ .previous #define EX2(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ PTR 9b,bad_stack; \ PTR 9b+4,bad_stack; \ .previous .set mips1 /* * Save a thread's fp context. */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) fpu_save_single a0, t1 # clobbers t1 jr ra END(_save_fp) /* * Restore a thread's fp context. */ LEAF(_restore_fp) fpu_restore_single a0, t1 # clobbers t1 jr ra END(_restore_fp) /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double * precision represents signaling NANS. * * The value to initialize fcr31 to comes in $a0. */ .set push SET_HARDFLOAT LEAF(_init_fpu) mfc0 t0, CP0_STATUS li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS ctc1 a0, fcr31 li t0, -1 mtc1 t0, $f0 mtc1 t0, $f1 mtc1 t0, $f2 mtc1 t0, $f3 mtc1 t0, $f4 mtc1 t0, $f5 mtc1 t0, $f6 mtc1 t0, $f7 mtc1 t0, $f8 mtc1 t0, $f9 mtc1 t0, $f10 mtc1 t0, $f11 mtc1 t0, $f12 mtc1 t0, $f13 mtc1 t0, $f14 mtc1 t0, $f15 mtc1 t0, $f16 mtc1 t0, $f17 mtc1 t0, $f18 mtc1 t0, $f19 mtc1 t0, $f20 mtc1 t0, $f21 mtc1 t0, $f22 mtc1 t0, $f23 mtc1 t0, $f24 mtc1 t0, $f25 mtc1 t0, $f26 mtc1 t0, $f27 mtc1 t0, $f28 mtc1 t0, $f29 mtc1 t0, $f30 mtc1 t0, $f31 jr ra END(_init_fpu) .set pop .set noreorder /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Save FP context, including the 32 FP data registers and the FP * control & status register, from the FPU to signal context. */ LEAF(_save_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success cfc1 t1, fcr31 EX2(s.d $f0, 0(a0)) EX2(s.d $f2, 16(a0)) EX2(s.d $f4, 32(a0)) EX2(s.d $f6, 48(a0)) EX2(s.d $f8, 64(a0)) EX2(s.d $f10, 80(a0)) EX2(s.d $f12, 96(a0)) EX2(s.d $f14, 112(a0)) EX2(s.d $f16, 128(a0)) EX2(s.d $f18, 144(a0)) EX2(s.d $f20, 160(a0)) EX2(s.d $f22, 176(a0)) EX2(s.d $f24, 192(a0)) EX2(s.d $f26, 208(a0)) EX2(s.d $f28, 224(a0)) EX2(s.d $f30, 240(a0)) jr ra EX(sw t1, (a1)) .set pop END(_save_fp_context) /** * _restore_fp_context() - restore FP context to the FPU * @a0 - pointer to fpregs field of sigcontext * @a1 - pointer to fpc_csr field of sigcontext * * Restore FP context, including the 32 FP data registers and the FP * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success EX(lw t0, (a1)) EX2(l.d $f0, 0(a0)) EX2(l.d $f2, 16(a0)) EX2(l.d $f4, 32(a0)) EX2(l.d $f6, 48(a0)) EX2(l.d $f8, 64(a0)) EX2(l.d $f10, 80(a0)) EX2(l.d $f12, 96(a0)) EX2(l.d $f14, 112(a0)) EX2(l.d $f16, 128(a0)) EX2(l.d $f18, 144(a0)) EX2(l.d $f20, 160(a0)) EX2(l.d $f22, 176(a0)) EX2(l.d $f24, 192(a0)) EX2(l.d $f26, 208(a0)) EX2(l.d $f28, 224(a0)) EX2(l.d $f30, 240(a0)) jr ra ctc1 t0, fcr31 .set pop END(_restore_fp_context) .set reorder .type fault, @function .ent fault fault: li v0, -EFAULT jr ra .end fault
AirFortressIlikara/LS2K0300-linux-4.19
7,181
arch/mips/kernel/bmips_vec.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) * * Reset/NMI/re-entry vectors for BMIPS processors */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/cpu.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> #include <asm/hazards.h> #include <asm/bmips.h> .macro BARRIER .set mips32 _ssnop _ssnop _ssnop .set mips0 .endm /*********************************************************************** * Alternate CPU1 startup vector for BMIPS4350 * * On some systems the bootloader has already started CPU1 and configured * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is * triggered by the SW1 interrupt. If that is the case we try to move * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. ***********************************************************************/ LEAF(bmips_smp_movevec) la k0, 1f li k1, CKSEG1 or k0, k1 jr k0 1: /* clear IV, pending IPIs */ mtc0 zero, CP0_CAUSE /* re-enable IRQs to wait for SW1 */ li k0, ST0_IE | ST0_BEV | STATUSF_IP1 mtc0 k0, CP0_STATUS /* set up CPU1 CBR; move BASE to 0xa000_0000 */ li k0, 0xff400000 mtc0 k0, $22, 6 /* set up relocation vector address based on thread ID */ mfc0 k1, $22, 3 srl k1, 16 andi k1, 0x8000 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0 or k0, k1 li k1, 0xa0080000 sw k1, 0(k0) /* wait here for SW1 interrupt from bmips_boot_secondary() */ wait la k0, bmips_reset_nmi_vec li k1, CKSEG1 or k0, k1 jr k0 END(bmips_smp_movevec) /*********************************************************************** * Reset/NMI vector * For BMIPS processors that can relocate their exception vectors, this * entire function gets copied to 0x8000_0000. ***********************************************************************/ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) .set push .set noat .align 4 #ifdef CONFIG_SMP /* if the NMI bit is clear, assume this is a CPU1 reset instead */ li k1, (1 << 19) mfc0 k0, CP0_STATUS and k0, k1 beqz k0, soft_reset #if defined(CONFIG_CPU_BMIPS5000) mfc0 k0, CP0_PRID li k1, PRID_IMP_BMIPS5000 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 1f /* if we're not on core 0, this must be the SMP boot signal */ li k1, (3 << 25) mfc0 k0, $22 and k0, k1 bnez k0, bmips_smp_entry 1: #endif /* CONFIG_CPU_BMIPS5000 */ #endif /* CONFIG_SMP */ /* nope, it's just a regular NMI */ SAVE_ALL move a0, sp /* clear EXL, ERL, BEV so that TLB refills still work */ mfc0 k0, CP0_STATUS li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE or k0, k1 xor k0, k1 mtc0 k0, CP0_STATUS BARRIER /* jump to the NMI handler function */ la k0, nmi_handler jr k0 RESTORE_ALL .set arch=r4000 eret #ifdef CONFIG_SMP soft_reset: #if defined(CONFIG_CPU_BMIPS5000) mfc0 k0, CP0_PRID andi k0, 0xff00 li k1, PRID_IMP_BMIPS5200 bne k0, k1, bmips_smp_entry /* if running on TP 1, jump to bmips_smp_entry */ mfc0 k0, $22 li k1, (1 << 24) and k1, k0 bnez k1, bmips_smp_entry nop /* * running on TP0, can not be core 0 (the boot core). * Check for soft reset. Indicates a warm boot */ mfc0 k0, $12 li k1, (1 << 20) and k0, k1 beqz k0, bmips_smp_entry /* * Warm boot. * Cache init is only done on TP0 */ la k0, bmips_5xxx_init jalr k0 nop b bmips_smp_entry nop #endif /*********************************************************************** * CPU1 reset vector (used for the initial boot only) * This is still part of bmips_reset_nmi_vec(). ***********************************************************************/ bmips_smp_entry: /* set up CP0 STATUS; enable FPU */ li k0, 0x30000000 mtc0 k0, CP0_STATUS BARRIER /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ mfc0 k0, CP0_CONFIG ori k0, 0x07 xori k0, 0x04 mtc0 k0, CP0_CONFIG mfc0 k0, CP0_PRID andi k0, 0xff00 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) li k1, PRID_IMP_BMIPS43XX bne k0, k1, 2f /* initialize CPU1's local I-cache */ li k0, 0x80000000 li k1, 0x80010000 mtc0 zero, $28 mtc0 zero, $28, 1 BARRIER 1: cache Index_Store_Tag_I, 0(k0) addiu k0, 16 bne k0, k1, 1b b 3f 2: #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) /* mask with PRID_IMP_BMIPS5000 to cover both variants */ li k1, PRID_IMP_BMIPS5000 andi k0, PRID_IMP_BMIPS5000 bne k0, k1, 3f /* set exception vector base */ la k0, ebase lw k0, 0(k0) mtc0 k0, $15, 1 BARRIER #endif /* CONFIG_CPU_BMIPS5000 */ 3: /* jump back to kseg0 in case we need to remap the kseg1 area */ la k0, 1f jr k0 1: la k0, bmips_enable_xks01 jalr k0 /* use temporary stack to set up upper memory TLB */ li sp, BMIPS_WARM_RESTART_VEC la k0, plat_wired_tlb_setup jalr k0 /* switch to permanent stack and continue booting */ .global bmips_secondary_reentry bmips_secondary_reentry: la k0, bmips_smp_boot_sp lw sp, 0(k0) la k0, bmips_smp_boot_gp lw gp, 0(k0) la k0, start_secondary jr k0 #endif /* CONFIG_SMP */ .align 4 .global bmips_reset_nmi_vec_end bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) .set pop /*********************************************************************** * CPU1 warm restart vector (used for second and subsequent boots). * Also used for S2 standby recovery (PM). * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) ***********************************************************************/ LEAF(bmips_smp_int_vec) .align 4 mfc0 k0, CP0_STATUS ori k0, 0x01 xori k0, 0x01 mtc0 k0, CP0_STATUS eret .align 4 .global bmips_smp_int_vec_end bmips_smp_int_vec_end: END(bmips_smp_int_vec) /*********************************************************************** * XKS01 support * Certain CPUs support extending kseg0 to 1024MB. ***********************************************************************/ LEAF(bmips_enable_xks01) #if defined(CONFIG_XKS01) mfc0 t0, CP0_PRID andi t2, t0, 0xff00 #if defined(CONFIG_CPU_BMIPS4380) li t1, PRID_IMP_BMIPS43XX bne t2, t1, 1f andi t0, 0xff addiu t1, t0, -PRID_REV_BMIPS4380_HI bgtz t1, 2f addiu t0, -PRID_REV_BMIPS4380_LO bltz t0, 2f mfc0 t0, $22, 3 li t1, 0x1ff0 li t2, (1 << 12) | (1 << 9) or t0, t1 xor t0, t1 or t0, t2 mtc0 t0, $22, 3 BARRIER b 2f 1: #endif /* CONFIG_CPU_BMIPS4380 */ #if defined(CONFIG_CPU_BMIPS5000) li t1, PRID_IMP_BMIPS5000 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ andi t2, PRID_IMP_BMIPS5000 bne t2, t1, 2f mfc0 t0, $22, 5 li t1, 0x01ff li t2, (1 << 8) | (1 << 5) or t0, t1 xor t0, t1 or t0, t2 mtc0 t0, $22, 5 BARRIER #endif /* CONFIG_CPU_BMIPS5000 */ 2: #endif /* defined(CONFIG_XKS01) */ jr ra END(bmips_enable_xks01)
AirFortressIlikara/LS2K0300-linux-4.19
14,945
arch/mips/kernel/octeon_switch.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ #include <asm/asm.h> #include <asm/export.h> #include <asm/asm-offsets.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ .align 7 LEAF(resume) .set arch=octeon mfc0 t1, CP0_STATUS LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ /* Extract the size of CVMSEG */ andi t0, 0x3f /* Multiply * (cache line size/sizeof(long)/2) */ sll t0, 7-LONGLOG-1 li t1, -32768 /* Base address of CVMSEG */ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ synciobdma 2: .set noreorder LONG_L t8, 0(t1) /* Load from CVMSEG */ subu t0, 1 /* Decrement loop var */ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ bnez t0, 2b /* Loop until we've copied it all */ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ .set reorder /* Disable access to CVMSEG */ dmfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ dmtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 LONG_L a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume) /* * void octeon_cop2_save(struct octeon_cop2_state *a0) */ .align 7 .set push .set noreorder LEAF(octeon_cop2_save) dmfc0 t9, $9,7 /* CvmCtl register. */ /* Save the COP2 CRC state */ dmfc2 t0, 0x0201 dmfc2 t1, 0x0202 dmfc2 t2, 0x0200 sd t0, OCTEON_CP2_CRC_IV(a0) sd t1, OCTEON_CP2_CRC_LENGTH(a0) /* Skip next instructions if CvmCtl[NODFA_CP2] set */ bbit1 t9, 28, 1f sd t2, OCTEON_CP2_CRC_POLY(a0) /* Save the LLM state */ dmfc2 t0, 0x0402 dmfc2 t1, 0x040A sd t0, OCTEON_CP2_LLM_DAT(a0) 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ sd t1, OCTEON_CP2_LLM_DAT+8(a0) /* Save the COP2 crypto state */ /* this part is mostly common to both pass 1 and later revisions */ dmfc2 t0, 0x0084 dmfc2 t1, 0x0080 dmfc2 t2, 0x0081 dmfc2 t3, 0x0082 sd t0, OCTEON_CP2_3DES_IV(a0) dmfc2 t0, 0x0088 sd t1, OCTEON_CP2_3DES_KEY(a0) dmfc2 t1, 0x0111 /* only necessary for pass 1 */ sd t2, OCTEON_CP2_3DES_KEY+8(a0) dmfc2 t2, 0x0102 sd t3, OCTEON_CP2_3DES_KEY+16(a0) dmfc2 t3, 0x0103 sd t0, OCTEON_CP2_3DES_RESULT(a0) dmfc2 t0, 0x0104 sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ dmfc2 t1, 0x0105 sd t2, OCTEON_CP2_AES_IV(a0) dmfc2 t2, 0x0106 sd t3, OCTEON_CP2_AES_IV+8(a0) dmfc2 t3, 0x0107 sd t0, OCTEON_CP2_AES_KEY(a0) dmfc2 t0, 0x0110 sd t1, OCTEON_CP2_AES_KEY+8(a0) dmfc2 t1, 0x0100 sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) mfc0 v0, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) /* Skip to the Pass1 version of the remainder of the COP2 state */ beq v0, v1, 2f sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ dmfc2 t3, 0x0242 subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ dmfc2 t0, 0x0243 sd t1, OCTEON_CP2_HSH_DATW(a0) dmfc2 t1, 0x0244 sd t2, OCTEON_CP2_HSH_DATW+8(a0) dmfc2 t2, 0x0245 sd t3, OCTEON_CP2_HSH_DATW+16(a0) dmfc2 t3, 0x0246 sd t0, OCTEON_CP2_HSH_DATW+24(a0) dmfc2 t0, 0x0247 sd t1, OCTEON_CP2_HSH_DATW+32(a0) dmfc2 t1, 0x0248 sd t2, OCTEON_CP2_HSH_DATW+40(a0) dmfc2 t2, 0x0249 sd t3, OCTEON_CP2_HSH_DATW+48(a0) dmfc2 t3, 0x024A sd t0, OCTEON_CP2_HSH_DATW+56(a0) dmfc2 t0, 0x024B sd t1, OCTEON_CP2_HSH_DATW+64(a0) dmfc2 t1, 0x024C sd t2, OCTEON_CP2_HSH_DATW+72(a0) dmfc2 t2, 0x024D sd t3, OCTEON_CP2_HSH_DATW+80(a0) dmfc2 t3, 0x024E sd t0, OCTEON_CP2_HSH_DATW+88(a0) dmfc2 t0, 0x0250 sd t1, OCTEON_CP2_HSH_DATW+96(a0) dmfc2 t1, 0x0251 sd t2, OCTEON_CP2_HSH_DATW+104(a0) dmfc2 t2, 0x0252 sd t3, OCTEON_CP2_HSH_DATW+112(a0) dmfc2 t3, 0x0253 sd t0, OCTEON_CP2_HSH_IVW(a0) dmfc2 t0, 0x0254 sd t1, OCTEON_CP2_HSH_IVW+8(a0) dmfc2 t1, 0x0255 sd t2, OCTEON_CP2_HSH_IVW+16(a0) dmfc2 t2, 0x0256 sd t3, OCTEON_CP2_HSH_IVW+24(a0) dmfc2 t3, 0x0257 sd t0, OCTEON_CP2_HSH_IVW+32(a0) dmfc2 t0, 0x0258 sd t1, OCTEON_CP2_HSH_IVW+40(a0) dmfc2 t1, 0x0259 sd t2, OCTEON_CP2_HSH_IVW+48(a0) dmfc2 t2, 0x025E sd t3, OCTEON_CP2_HSH_IVW+56(a0) dmfc2 t3, 0x025A sd t0, OCTEON_CP2_GFM_MULT(a0) dmfc2 t0, 0x025B sd t1, OCTEON_CP2_GFM_MULT+8(a0) sd t2, OCTEON_CP2_GFM_POLY(a0) sd t3, OCTEON_CP2_GFM_RESULT(a0) bltz v1, 4f sd t0, OCTEON_CP2_GFM_RESULT+8(a0) /* OCTEON III things*/ dmfc2 t0, 0x024F dmfc2 t1, 0x0050 sd t0, OCTEON_CP2_SHA3(a0) sd t1, OCTEON_CP2_SHA3+8(a0) 4: jr ra nop 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 dmfc2 t0, 0x0041 dmfc2 t1, 0x0042 dmfc2 t2, 0x0043 sd t3, OCTEON_CP2_HSH_DATW(a0) dmfc2 t3, 0x0044 sd t0, OCTEON_CP2_HSH_DATW+8(a0) dmfc2 t0, 0x0045 sd t1, OCTEON_CP2_HSH_DATW+16(a0) dmfc2 t1, 0x0046 sd t2, OCTEON_CP2_HSH_DATW+24(a0) dmfc2 t2, 0x0048 sd t3, OCTEON_CP2_HSH_DATW+32(a0) dmfc2 t3, 0x0049 sd t0, OCTEON_CP2_HSH_DATW+40(a0) dmfc2 t0, 0x004A sd t1, OCTEON_CP2_HSH_DATW+48(a0) sd t2, OCTEON_CP2_HSH_IVW(a0) sd t3, OCTEON_CP2_HSH_IVW+8(a0) sd t0, OCTEON_CP2_HSH_IVW+16(a0) 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra nop END(octeon_cop2_save) .set pop /* * void octeon_cop2_restore(struct octeon_cop2_state *a0) */ .align 7 .set push .set noreorder LEAF(octeon_cop2_restore) /* First cache line was prefetched before the call */ pref 4, 128(a0) dmfc0 t9, $9,7 /* CvmCtl register. */ pref 4, 256(a0) ld t0, OCTEON_CP2_CRC_IV(a0) pref 4, 384(a0) ld t1, OCTEON_CP2_CRC_LENGTH(a0) ld t2, OCTEON_CP2_CRC_POLY(a0) /* Restore the COP2 CRC state */ dmtc2 t0, 0x0201 dmtc2 t1, 0x1202 bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ dmtc2 t2, 0x4200 /* Restore the LLM state */ ld t0, OCTEON_CP2_LLM_DAT(a0) ld t1, OCTEON_CP2_LLM_DAT+8(a0) dmtc2 t0, 0x0402 dmtc2 t1, 0x040A 2: bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */ nop /* Restore the COP2 crypto state common to pass 1 and pass 2 */ ld t0, OCTEON_CP2_3DES_IV(a0) ld t1, OCTEON_CP2_3DES_KEY(a0) ld t2, OCTEON_CP2_3DES_KEY+8(a0) dmtc2 t0, 0x0084 ld t0, OCTEON_CP2_3DES_KEY+16(a0) dmtc2 t1, 0x0080 ld t1, OCTEON_CP2_3DES_RESULT(a0) dmtc2 t2, 0x0081 ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ dmtc2 t0, 0x0082 ld t0, OCTEON_CP2_AES_IV(a0) dmtc2 t1, 0x0098 ld t1, OCTEON_CP2_AES_IV+8(a0) dmtc2 t2, 0x010A /* only really needed for pass 1 */ ld t2, OCTEON_CP2_AES_KEY(a0) dmtc2 t0, 0x0102 ld t0, OCTEON_CP2_AES_KEY+8(a0) dmtc2 t1, 0x0103 ld t1, OCTEON_CP2_AES_KEY+16(a0) dmtc2 t2, 0x0104 ld t2, OCTEON_CP2_AES_KEY+24(a0) dmtc2 t0, 0x0105 ld t0, OCTEON_CP2_AES_KEYLEN(a0) dmtc2 t1, 0x0106 ld t1, OCTEON_CP2_AES_RESULT(a0) dmtc2 t2, 0x0107 ld t2, OCTEON_CP2_AES_RESULT+8(a0) mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 /* this code is specific for pass 1 */ ld t0, OCTEON_CP2_HSH_DATW(a0) ld t1, OCTEON_CP2_HSH_DATW+8(a0) ld t2, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t0, 0x0040 ld t0, OCTEON_CP2_HSH_DATW+24(a0) dmtc2 t1, 0x0041 ld t1, OCTEON_CP2_HSH_DATW+32(a0) dmtc2 t2, 0x0042 ld t2, OCTEON_CP2_HSH_DATW+40(a0) dmtc2 t0, 0x0043 ld t0, OCTEON_CP2_HSH_DATW+48(a0) dmtc2 t1, 0x0044 ld t1, OCTEON_CP2_HSH_IVW(a0) dmtc2 t2, 0x0045 ld t2, OCTEON_CP2_HSH_IVW+8(a0) dmtc2 t0, 0x0046 ld t0, OCTEON_CP2_HSH_IVW+16(a0) dmtc2 t1, 0x0048 dmtc2 t2, 0x0049 b done_restore /* unconditional branch */ dmtc2 t0, 0x004A 3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t2, 0x0240 ld t2, OCTEON_CP2_HSH_DATW+24(a0) dmtc2 t0, 0x0241 ld t0, OCTEON_CP2_HSH_DATW+32(a0) dmtc2 t1, 0x0242 ld t1, OCTEON_CP2_HSH_DATW+40(a0) dmtc2 t2, 0x0243 ld t2, OCTEON_CP2_HSH_DATW+48(a0) dmtc2 t0, 0x0244 ld t0, OCTEON_CP2_HSH_DATW+56(a0) dmtc2 t1, 0x0245 ld t1, OCTEON_CP2_HSH_DATW+64(a0) dmtc2 t2, 0x0246 ld t2, OCTEON_CP2_HSH_DATW+72(a0) dmtc2 t0, 0x0247 ld t0, OCTEON_CP2_HSH_DATW+80(a0) dmtc2 t1, 0x0248 ld t1, OCTEON_CP2_HSH_DATW+88(a0) dmtc2 t2, 0x0249 ld t2, OCTEON_CP2_HSH_DATW+96(a0) dmtc2 t0, 0x024A ld t0, OCTEON_CP2_HSH_DATW+104(a0) dmtc2 t1, 0x024B ld t1, OCTEON_CP2_HSH_DATW+112(a0) dmtc2 t2, 0x024C ld t2, OCTEON_CP2_HSH_IVW(a0) dmtc2 t0, 0x024D ld t0, OCTEON_CP2_HSH_IVW+8(a0) dmtc2 t1, 0x024E ld t1, OCTEON_CP2_HSH_IVW+16(a0) dmtc2 t2, 0x0250 ld t2, OCTEON_CP2_HSH_IVW+24(a0) dmtc2 t0, 0x0251 ld t0, OCTEON_CP2_HSH_IVW+32(a0) dmtc2 t1, 0x0252 ld t1, OCTEON_CP2_HSH_IVW+40(a0) dmtc2 t2, 0x0253 ld t2, OCTEON_CP2_HSH_IVW+48(a0) dmtc2 t0, 0x0254 ld t0, OCTEON_CP2_HSH_IVW+56(a0) dmtc2 t1, 0x0255 ld t1, OCTEON_CP2_GFM_MULT(a0) dmtc2 t2, 0x0256 ld t2, OCTEON_CP2_GFM_MULT+8(a0) dmtc2 t0, 0x0257 ld t0, OCTEON_CP2_GFM_POLY(a0) dmtc2 t1, 0x0258 ld t1, OCTEON_CP2_GFM_RESULT(a0) dmtc2 t2, 0x0259 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) dmtc2 t0, 0x025E subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ dmtc2 t1, 0x025A bltz v0, done_restore dmtc2 t2, 0x025B /* OCTEON III things*/ ld t0, OCTEON_CP2_SHA3(a0) ld t1, OCTEON_CP2_SHA3+8(a0) dmtc2 t0, 0x0051 dmtc2 t1, 0x0050 done_restore: jr ra nop END(octeon_cop2_restore) .set pop /* * void octeon_mult_save() * sp is assumed to point to a struct pt_regs * * NOTE: This is called in SAVE_TEMP in stackframe.h. It can * safely modify v1,k0, k1,$10-$15, and $24. It will * be overwritten with a processor specific version of the code. */ .p2align 7 .set push .set noreorder LEAF(octeon_mult_save) jr ra nop .space 30 * 4, 0 octeon_mult_save_end: EXPORT(octeon_mult_save_end) END(octeon_mult_save) LEAF(octeon_mult_save2) /* Save the multiplier state OCTEON II and earlier*/ v3mulu k0, $0, $0 v3mulu k1, $0, $0 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ v3mulu k0, $0, $0 sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ ori k1, $0, 1 v3mulu k1, k1, $0 sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ v3mulu k0, $0, $0 sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ v3mulu k1, $0, $0 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ octeon_mult_save2_end: EXPORT(octeon_mult_save2_end) END(octeon_mult_save2) LEAF(octeon_mult_save3) /* Save the multiplier state OCTEON III */ v3mulu $10, $0, $0 /* read P0 */ v3mulu $11, $0, $0 /* read P1 */ v3mulu $12, $0, $0 /* read P2 */ sd $10, PT_MTP+(0*8)(sp) /* store P0 */ v3mulu $10, $0, $0 /* read P3 */ sd $11, PT_MTP+(1*8)(sp) /* store P1 */ v3mulu $11, $0, $0 /* read P4 */ sd $12, PT_MTP+(2*8)(sp) /* store P2 */ ori $13, $0, 1 v3mulu $12, $0, $0 /* read P5 */ sd $10, PT_MTP+(3*8)(sp) /* store P3 */ v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ sd $11, PT_MTP+(4*8)(sp) /* store P4 */ v3mulu $10, $0, $0 /* read MPL1 */ sd $12, PT_MTP+(5*8)(sp) /* store P5 */ v3mulu $11, $0, $0 /* read MPL2 */ sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ v3mulu $12, $0, $0 /* read MPL3 */ sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ v3mulu $10, $0, $0 /* read MPL4 */ sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ v3mulu $11, $0, $0 /* read MPL5 */ sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ octeon_mult_save3_end: EXPORT(octeon_mult_save3_end) END(octeon_mult_save3) .set pop /* * void octeon_mult_restore() * sp is assumed to point to a struct pt_regs * * NOTE: This is called in RESTORE_TEMP in stackframe.h. */ .p2align 7 .set push .set noreorder LEAF(octeon_mult_restore) jr ra nop .space 30 * 4, 0 octeon_mult_restore_end: EXPORT(octeon_mult_restore_end) END(octeon_mult_restore) LEAF(octeon_mult_restore2) ld v0, PT_MPL(sp) /* MPL0 */ ld v1, PT_MPL+8(sp) /* MPL1 */ ld k0, PT_MPL+16(sp) /* MPL2 */ /* Restore the multiplier state */ ld k1, PT_MTP+16(sp) /* P2 */ mtm0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ mtm1 v1 /* MPL1 */ ld v1, PT_MTP(sp) /* P0 */ mtm2 k0 /* MPL2 */ mtp2 k1 /* P2 */ mtp1 v0 /* P1 */ jr ra mtp0 v1 /* P0 */ octeon_mult_restore2_end: EXPORT(octeon_mult_restore2_end) END(octeon_mult_restore2) LEAF(octeon_mult_restore3) ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ .word 0x718d0008 /* mtm0 $12, $13 restore MPL0 and MPL3 */ ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ .word 0x714b000c /* mtm1 $10, $11 restore MPL1 and MPL4 */ ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ ld $10, PT_MTP+(0*8)(sp) /* read P0 */ ld $11, PT_MTP+(3*8)(sp) /* read P3 */ .word 0x718d000d /* mtm2 $12, $13 restore MPL2 and MPL5 */ ld $12, PT_MTP+(1*8)(sp) /* read P1 */ .word 0x714b0009 /* mtp0 $10, $11 restore P0 and P3 */ ld $13, PT_MTP+(4*8)(sp) /* read P4 */ ld $10, PT_MTP+(2*8)(sp) /* read P2 */ ld $11, PT_MTP+(5*8)(sp) /* read P5 */ .word 0x718d000a /* mtp1 $12, $13 restore P1 and P4 */ jr ra .word 0x714b000b /* mtp2 $10, $11 restore P2 and P5 */ octeon_mult_restore3_end: EXPORT(octeon_mult_restore3_end) END(octeon_mult_restore3) .set pop
AirFortressIlikara/LS2K0300-linux-4.19
10,036
arch/mips/kernel/scall64-64.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/asm-offsets.h> #include <asm/sysmips.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/war.h> #ifndef CONFIG_BINFMT_ELF32 /* Neither O32 nor N32, so define handle_sys here */ #define handle_sys64 handle_sys #endif .align 5 NESTED(handle_sys64, PT_SIZE, sp) #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) /* * When 32-bit compatibility is configured scall_o32.S * already did this. */ .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at #endif #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif sd a3, PT_R26(sp) # save a3 for syscall restarting dli t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, syscall_trace_entry syscall_common: dsubu t2, v0, __NR_64_Linux sltiu t0, t2, __NR_64_Linux_syscalls + 1 beqz t0, illegal_syscall dsll t0, t2, 3 # offset into table dla t2, sys_call_table daddu t0, t2, t0 ld t2, (t0) # syscall routine beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result n64_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ syscall_trace_entry: SAVE_STATIC move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) j syscall_common 1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ li v0, ENOSYS # error sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) j n64_syscall_exit END(handle_sys64) .align 3 .type sys_call_table, @object EXPORT(sys_call_table) PTR sys_read /* 5000 */ PTR sys_write PTR sys_open PTR sys_close PTR sys_newstat PTR sys_newfstat /* 5005 */ PTR sys_newlstat PTR sys_poll PTR sys_lseek PTR sys_mips_mmap PTR sys_mprotect /* 5010 */ PTR sys_munmap PTR sys_brk PTR sys_rt_sigaction PTR sys_rt_sigprocmask PTR sys_ioctl /* 5015 */ PTR sys_pread64 PTR sys_pwrite64 PTR sys_readv PTR sys_writev PTR sys_access /* 5020 */ PTR sysm_pipe PTR sys_select PTR sys_sched_yield PTR sys_mremap PTR sys_msync /* 5025 */ PTR sys_mincore PTR sys_madvise PTR sys_shmget PTR sys_shmat PTR sys_shmctl /* 5030 */ PTR sys_dup PTR sys_dup2 PTR sys_pause PTR sys_nanosleep PTR sys_getitimer /* 5035 */ PTR sys_setitimer PTR sys_alarm PTR sys_getpid PTR sys_sendfile64 PTR sys_socket /* 5040 */ PTR sys_connect PTR sys_accept PTR sys_sendto PTR sys_recvfrom PTR sys_sendmsg /* 5045 */ PTR sys_recvmsg PTR sys_shutdown PTR sys_bind PTR sys_listen PTR sys_getsockname /* 5050 */ PTR sys_getpeername PTR sys_socketpair PTR sys_setsockopt PTR sys_getsockopt PTR __sys_clone /* 5055 */ PTR __sys_fork PTR sys_execve PTR sys_exit PTR sys_wait4 PTR sys_kill /* 5060 */ PTR sys_newuname PTR sys_semget PTR sys_semop PTR sys_semctl PTR sys_shmdt /* 5065 */ PTR sys_msgget PTR sys_msgsnd PTR sys_msgrcv PTR sys_msgctl PTR sys_fcntl /* 5070 */ PTR sys_flock PTR sys_fsync PTR sys_fdatasync PTR sys_truncate PTR sys_ftruncate /* 5075 */ PTR sys_getdents PTR sys_getcwd PTR sys_chdir PTR sys_fchdir PTR sys_rename /* 5080 */ PTR sys_mkdir PTR sys_rmdir PTR sys_creat PTR sys_link PTR sys_unlink /* 5085 */ PTR sys_symlink PTR sys_readlink PTR sys_chmod PTR sys_fchmod PTR sys_chown /* 5090 */ PTR sys_fchown PTR sys_lchown PTR sys_umask PTR sys_gettimeofday PTR sys_getrlimit /* 5095 */ PTR sys_getrusage PTR sys_sysinfo PTR sys_times PTR sys_ptrace PTR sys_getuid /* 5100 */ PTR sys_syslog PTR sys_getgid PTR sys_setuid PTR sys_setgid PTR sys_geteuid /* 5105 */ PTR sys_getegid PTR sys_setpgid PTR sys_getppid PTR sys_getpgrp PTR sys_setsid /* 5110 */ PTR sys_setreuid PTR sys_setregid PTR sys_getgroups PTR sys_setgroups PTR sys_setresuid /* 5115 */ PTR sys_getresuid PTR sys_setresgid PTR sys_getresgid PTR sys_getpgid PTR sys_setfsuid /* 5120 */ PTR sys_setfsgid PTR sys_getsid PTR sys_capget PTR sys_capset PTR sys_rt_sigpending /* 5125 */ PTR sys_rt_sigtimedwait PTR sys_rt_sigqueueinfo PTR sys_rt_sigsuspend PTR sys_sigaltstack PTR sys_utime /* 5130 */ PTR sys_mknod PTR sys_personality PTR sys_ustat PTR sys_statfs PTR sys_fstatfs /* 5135 */ PTR sys_sysfs PTR sys_getpriority PTR sys_setpriority PTR sys_sched_setparam PTR sys_sched_getparam /* 5140 */ PTR sys_sched_setscheduler PTR sys_sched_getscheduler PTR sys_sched_get_priority_max PTR sys_sched_get_priority_min PTR sys_sched_rr_get_interval /* 5145 */ PTR sys_mlock PTR sys_munlock PTR sys_mlockall PTR sys_munlockall PTR sys_vhangup /* 5150 */ PTR sys_pivot_root PTR sys_sysctl PTR sys_prctl PTR sys_adjtimex PTR sys_setrlimit /* 5155 */ PTR sys_chroot PTR sys_sync PTR sys_acct PTR sys_settimeofday PTR sys_mount /* 5160 */ PTR sys_umount PTR sys_swapon PTR sys_swapoff PTR sys_reboot PTR sys_sethostname /* 5165 */ PTR sys_setdomainname PTR sys_ni_syscall /* was create_module */ PTR sys_init_module PTR sys_delete_module PTR sys_ni_syscall /* 5170, was get_kernel_syms */ PTR sys_ni_syscall /* was query_module */ PTR sys_quotactl PTR sys_ni_syscall /* was nfsservctl */ PTR sys_ni_syscall /* res. for getpmsg */ PTR sys_ni_syscall /* 5175 for putpmsg */ PTR sys_ni_syscall /* res. for afs_syscall */ PTR sys_ni_syscall /* res. for security */ PTR sys_gettid PTR sys_readahead PTR sys_setxattr /* 5180 */ PTR sys_lsetxattr PTR sys_fsetxattr PTR sys_getxattr PTR sys_lgetxattr PTR sys_fgetxattr /* 5185 */ PTR sys_listxattr PTR sys_llistxattr PTR sys_flistxattr PTR sys_removexattr PTR sys_lremovexattr /* 5190 */ PTR sys_fremovexattr PTR sys_tkill PTR sys_ni_syscall PTR sys_futex PTR sys_sched_setaffinity /* 5195 */ PTR sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl PTR __sys_sysmips PTR sys_io_setup /* 5200 */ PTR sys_io_destroy PTR sys_io_getevents PTR sys_io_submit PTR sys_io_cancel PTR sys_exit_group /* 5205 */ PTR sys_lookup_dcookie PTR sys_epoll_create PTR sys_epoll_ctl PTR sys_epoll_wait PTR sys_remap_file_pages /* 5210 */ PTR sys_rt_sigreturn PTR sys_set_tid_address PTR sys_restart_syscall PTR sys_semtimedop PTR sys_fadvise64_64 /* 5215 */ PTR sys_timer_create PTR sys_timer_settime PTR sys_timer_gettime PTR sys_timer_getoverrun PTR sys_timer_delete /* 5220 */ PTR sys_clock_settime PTR sys_clock_gettime PTR sys_clock_getres PTR sys_clock_nanosleep PTR sys_tgkill /* 5225 */ PTR sys_utimes PTR sys_mbind PTR sys_get_mempolicy PTR sys_set_mempolicy PTR sys_mq_open /* 5230 */ PTR sys_mq_unlink PTR sys_mq_timedsend PTR sys_mq_timedreceive PTR sys_mq_notify PTR sys_mq_getsetattr /* 5235 */ PTR sys_ni_syscall /* sys_vserver */ PTR sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key /* 5240 */ PTR sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch PTR sys_inotify_rm_watch /* 5245 */ PTR sys_migrate_pages PTR sys_openat PTR sys_mkdirat PTR sys_mknodat PTR sys_fchownat /* 5250 */ PTR sys_futimesat PTR sys_newfstatat PTR sys_unlinkat PTR sys_renameat PTR sys_linkat /* 5255 */ PTR sys_symlinkat PTR sys_readlinkat PTR sys_fchmodat PTR sys_faccessat PTR sys_pselect6 /* 5260 */ PTR sys_ppoll PTR sys_unshare PTR sys_splice PTR sys_sync_file_range PTR sys_tee /* 5265 */ PTR sys_vmsplice PTR sys_move_pages PTR sys_set_robust_list PTR sys_get_robust_list PTR sys_kexec_load /* 5270 */ PTR sys_getcpu PTR sys_epoll_pwait PTR sys_ioprio_set PTR sys_ioprio_get PTR sys_utimensat /* 5275 */ PTR sys_signalfd PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create /* 5280 */ PTR sys_timerfd_gettime PTR sys_timerfd_settime PTR sys_signalfd4 PTR sys_eventfd2 PTR sys_epoll_create1 /* 5285 */ PTR sys_dup3 PTR sys_pipe2 PTR sys_inotify_init1 PTR sys_preadv PTR sys_pwritev /* 5290 */ PTR sys_rt_tgsigqueueinfo PTR sys_perf_event_open PTR sys_accept4 PTR sys_recvmmsg PTR sys_fanotify_init /* 5295 */ PTR sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR sys_open_by_handle_at PTR sys_clock_adjtime /* 5300 */ PTR sys_syncfs PTR sys_sendmmsg PTR sys_setns PTR sys_process_vm_readv PTR sys_process_vm_writev /* 5305 */ PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 PTR sys_sched_setattr PTR sys_sched_getattr /* 5310 */ PTR sys_renameat2 PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 5315 */ PTR sys_execveat PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 5320 */ PTR sys_preadv2 PTR sys_pwritev2 PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 5325 */ PTR sys_statx PTR sys_rseq PTR sys_io_pgetevents .size sys_call_table,.-sys_call_table
AirFortressIlikara/LS2K0300-linux-4.19
3,880
arch/mips/kernel/entry-ftrace-regs.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/mips/kernel/entry_ftrace.S * * Copyright (C) 2021 Loongson Corp * Author: Huang Pei <huangpei@loongson.cn> */ #include <asm/export.h> #include <asm/regdef.h> #include <asm/stackframe.h> /* * ftrace_regs_caller() is the function that replaces _mcount() when ftrace * is active. * * we arrive here after a function A calls function B, and B is what we * are tracing for. When we enter, sp points to A's stack frame, B has not * yet had a chance to allocate one yet. (This is different from -pg case * , in which the B's stack is allocated)) * when ftrace initialized, it replace three nops from all function with * "lui + nop + move" * B: * lui at, %hi(ftrace_regs_caller) * nop * li t0, 0 * # B's real start * * at B's entry, when tracing enabled, replace the 'nop' with 'jalr' * * # B's entry, three nop for both in vmlinux and in kernel modules * B: * lui at, %hi(ftrace_regs_caller) * jalr at, at * move t0, zero * # B's real start * * if set t0 to 1, then calling ftrace_regs_caller with partial regs saved * * B: * lui at, %hi(ftrace_regs_caller) * jalr at, at * li t0, 1 * # B's real start * * we make ftrace_regs_caller 64KB aligned, when entring ftrace_regs_caller * AT points to the return address to B, and ra points to return address * to A, * * if patched to new funcition, then clobbered the first real instruction * * B: * lui at, %hi(new_B) * addiu at, at, %lo(new_B) * jr at * # B's real start, now clobbered with zero * nop * */ .text .set push .set noreorder .set noat .align 16 NESTED(ftrace_regs_caller, PT_SIZE, ra) PTR_ADDIU sp, sp, -PT_SIZE .globl ftrace_caller ftrace_caller: #ifdef CONFIG_64BIT PTR_S a4, PT_R8(sp) PTR_S a5, PT_R9(sp) PTR_S a6, PT_R10(sp) PTR_S a7, PT_R11(sp) #endif PTR_S a0, PT_R4(sp) PTR_S a1, PT_R5(sp) PTR_S a2, PT_R6(sp) bnez t0, 1f PTR_S a3, PT_R7(sp) PTR_S t0, PT_R12(sp) PTR_S t1, PT_R13(sp) PTR_S t2, PT_R14(sp) PTR_S t3, PT_R15(sp) PTR_S s0, PT_R16(sp) PTR_S s1, PT_R17(sp) PTR_S s2, PT_R18(sp) PTR_S s3, PT_R19(sp) PTR_S s4, PT_R20(sp) PTR_S s5, PT_R21(sp) PTR_S s6, PT_R22(sp) PTR_S s7, PT_R23(sp) PTR_S t8, PT_R24(sp) PTR_S t9, PT_R25(sp) PTR_S s8, PT_R30(sp) PTR_S gp, PT_R28(sp) PTR_S AT, PT_R1(sp) 1: PTR_LA t0, PT_SIZE(sp) PTR_S AT, PT_R0(sp) //R0 for expected epc PTR_S t0, PT_R29(sp) PTR_S ra, PT_R31(sp) PTR_S AT, PT_EPC(sp) //PT_EPC maybe changed by kprobe handler END(ftrace_regs_caller) ftrace_common: PTR_ADDIU a0, AT, -12 //a0 points to B's entry address move a1, ra //a1 points to return address to A PTR_L a2, function_trace_op //a2 points to function_trace op .globl ftrace_call ftrace_call: jal ftrace_stub move a3, sp //a3 point to pt_regs #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: nop nop #endif ftrace_common_return: PTR_L AT, PT_R31(sp) ftrace_graph_return: PTR_L ra, PT_EPC(sp) PTR_L a0, PT_R4(sp) PTR_L a1, PT_R5(sp) PTR_L a2, PT_R6(sp) PTR_L a3, PT_R7(sp) #ifdef CONFIG_64BIT PTR_L a4, PT_R8(sp) PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) #endif PTR_ADDIU sp, sp, PT_SIZE //retore stack frame jr ra move ra, AT .globl ftrace_stub ftrace_stub: jr ra nop #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_caller ftrace_graph_caller: PTR_L a0, PT_R31(sp) PTR_L a1, PT_EPC(sp) jal prepare_ftrace_return PTR_ADDIU a2, sp, PT_SIZE b ftrace_graph_return move AT, v0 .align 2 .globl return_to_handler return_to_handler: PTR_SUBU sp, PT_SIZE PTR_S v0, PT_R2(sp) PTR_S v1, PT_R3(sp) jal ftrace_return_to_handler PTR_LA a0, PT_SIZE(sp) /* restore the real parent address: v0 -> ra */ move ra, v0 PTR_L v0, PT_R2(sp) PTR_L v1, PT_R3(sp) jr ra PTR_ADDIU sp, PT_SIZE .set at .set reorder #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
AirFortressIlikara/LS2K0300-linux-4.19
4,409
arch/mips/kernel/entry.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/compiler.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/isadep.h> #include <asm/thread_info.h> #include <asm/war.h> #ifndef CONFIG_PREEMPT #define resume_kernel restore_all #else #define __ret_from_irq ret_from_exception #endif .text .align 5 #ifndef CONFIG_PREEMPT FEXPORT(ret_from_exception) local_irq_disable # preempt stop b __ret_from_irq #endif FEXPORT(ret_from_irq) LONG_S s0, TI_REGS($28) FEXPORT(__ret_from_irq) /* * We can be coming here from a syscall done in the kernel space, * e.g. a failed kernel_execve(). */ resume_userspace_check: LONG_L t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER beqz t0, resume_kernel resume_userspace: local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return LONG_L a2, TI_FLAGS($28) # current->work andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) bnez t0, work_pending j restore_all #ifdef CONFIG_PREEMPT resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) bnez t0, restore_all need_resched: LONG_L t0, TI_FLAGS($28) andi t1, t0, _TIF_NEED_RESCHED beqz t1, restore_all LONG_L t0, PT_STATUS(sp) # Interrupts off? andi t0, 1 beqz t0, restore_all jal preempt_schedule_irq b need_resched #endif FEXPORT(ret_from_kernel_thread) jal schedule_tail # a0 = struct task_struct *prev move a0, s1 jal s0 j syscall_exit FEXPORT(ret_from_fork) jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) #ifdef CONFIG_DEBUG_RSEQ move a0, sp jal rseq_syscall #endif local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) # current->work li t0, _TIF_ALLWORK_MASK and t0, a2, t0 bnez t0, syscall_exit_work restore_all: # restore full frame .set noat RESTORE_TEMP RESTORE_AT RESTORE_STATIC restore_partial: # restore partial frame #ifdef CONFIG_TRACE_IRQFLAGS SAVE_STATIC SAVE_AT SAVE_TEMP LONG_L v0, PT_STATUS(sp) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) and v0, ST0_IEP #else and v0, ST0_IE #endif beqz v0, 1f jal trace_hardirqs_on b 2f 1: jal trace_hardirqs_off 2: RESTORE_TEMP RESTORE_AT RESTORE_STATIC #endif RESTORE_SOME RESTORE_SP_AND_RET .set at work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and # signals dont change between # sampling and return LONG_L a2, TI_FLAGS($28) andi t0, a2, _TIF_WORK_MASK # is there any work to be done # other than syscall tracing? beqz t0, restore_all andi t0, a2, _TIF_NEED_RESCHED bnez t0, work_resched work_notifysig: # deal with pending signals and # notify-resume requests move a0, sp li a1, 0 jal do_notify_resume # a2 already loaded j resume_userspace_check FEXPORT(syscall_exit_partial) #ifdef CONFIG_DEBUG_RSEQ move a0, sp jal rseq_syscall #endif local_irq_disable # make sure need_resched doesn't # change between and return LONG_L a2, TI_FLAGS($28) # current->work li t0, _TIF_ALLWORK_MASK and t0, a2 beqz t0, restore_partial SAVE_STATIC syscall_exit_work: LONG_L t0, PT_STATUS(sp) # returning to kernel mode? andi t0, t0, KU_USER beqz t0, resume_kernel li t0, _TIF_WORK_SYSCALL_EXIT and t0, a2 # a2 is preloaded with TI_FLAGS beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead move a0, sp jal syscall_trace_leave b resume_userspace #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ defined(CONFIG_MIPS_MT) /* * MIPS32R2 Instruction Hazard Barrier - must be called * * For C code use the inline version named instruction_hazard(). */ LEAF(mips_ihb) .set MIPS_ISA_LEVEL_RAW jr.hb ra nop END(mips_ihb) #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
AirFortressIlikara/LS2K0300-linux-4.19
4,801
arch/mips/kernel/relocate_kernel.S
/* * relocate_kernel.S for kexec * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006 * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cpu.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> LEAF(relocate_new_kernel) PTR_L a0, arg0 PTR_L a1, arg1 PTR_L a2, arg2 PTR_L a3, arg3 PTR_L s0, kexec_indirection_page PTR_L s1, kexec_start_address process_entry: PTR_L s2, (s0) PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not * populated as the kernel is directly copied to a reserved location */ beqz s2, done /* destination page */ and s3, s2, 0x1 beq s3, zero, 1f and s4, s2, ~0x1 /* store destination addr in s4 */ b process_entry 1: /* indirection page, update s0 */ and s3, s2, 0x2 beq s3, zero, 1f and s0, s2, ~0x2 b process_entry 1: /* done page */ and s3, s2, 0x4 beq s3, zero, 1f b done 1: /* source page */ and s3, s2, 0x8 beq s3, zero, process_entry and s2, s2, ~0x8 li s6, (1 << _PAGE_SHIFT) / SZREG copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) PTR_ADDIU s4, s4, SZREG PTR_ADDIU s2, s2, SZREG LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry done: #ifdef CONFIG_SMP /* kexec_flag reset is signal to other CPUs what kernel was moved to it's location. Note - we need relocated address of kexec_flag. */ bal 1f 1: move t1,ra; PTR_LA t2,1b PTR_LA t0,kexec_flag PTR_SUB t0,t0,t2; PTR_ADD t0,t1,t0; LONG_S zero,(t0) #endif #ifdef CONFIG_CPU_CAVIUM_OCTEON /* We need to flush I-cache before jumping to new kernel. * Unfortunately, this code is cpu-specific. */ .set push .set noreorder syncw syncw synci 0($0) .set pop #else sync #endif /* jump to kexec_start_address */ j s1 END(relocate_new_kernel) #ifdef CONFIG_SMP /* * Other CPUs should wait until code is relocated and * then start at entry (?) point. */ LEAF(kexec_smp_wait) PTR_L a0, s_arg0 PTR_L a1, s_arg1 PTR_L a2, s_arg2 PTR_L a3, s_arg3 PTR_L s1, kexec_start_address /* Non-relocated address works for args and kexec_start_address ( old * kernel is not overwritten). But we need relocated address of * kexec_flag. */ bal 1f 1: move t1,ra; PTR_LA t2,1b PTR_LA t0,kexec_flag PTR_SUB t0,t0,t2; PTR_ADD t0,t1,t0; 1: LONG_L s0, (t0) bne s0, zero,1b #ifdef CONFIG_CPU_CAVIUM_OCTEON .set push .set noreorder synci 0($0) .set pop #else sync #endif #ifdef CONFIG_CPU_LOONGSON3 /* s0:prid s1:initfn */ /* a0:base t1:cpuid t2:node t3:core t9:count */ mfc0 t1, CP0_EBASE andi t1, MIPS_EBASE_CPUNUM dins a0, t1, 8, 2 /* insert core id*/ dext t2, t1, 2, 2 dins a0, t2, 44, 2 /* insert node id */ mfc0 s0, CP0_PRID andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) beq s0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3B_R1), 1f beq s0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3B_R2), 1f b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */ 1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */ 2: li t9, 0x100 /* wait for init loop */ 3: addiu t9, -1 /* limit mailbox access */ bnez t9, 3b lw s1, 0x20(a0) /* check lower 32-bit as jump indicator */ beqz s1, 2b ld s1, 0x20(a0) /* get PC (whole 64-bit) via mailbox */ ld sp, 0x28(a0) /* get SP via mailbox reg1 */ ld gp, 0x30(a0) /* get GP via mailbox reg2 */ ld a1, 0x38(a0) #endif j s1 END(kexec_smp_wait) #endif #ifdef __mips64 /* all PTR's must be aligned to 8 byte in 64-bit mode */ .align 3 #endif /* All parameters to new kernel are passed in registers a0-a3. * kexec_args[0..3] are used to prepare register values. */ kexec_args: EXPORT(kexec_args) arg0: PTR 0x0 arg1: PTR 0x0 arg2: PTR 0x0 arg3: PTR 0x0 .size kexec_args,PTRSIZE*4 #ifdef CONFIG_SMP /* * Secondary CPUs may have different kernel parameters in * their registers a0-a3. secondary_kexec_args[0..3] are used * to prepare register values. */ secondary_kexec_args: EXPORT(secondary_kexec_args) s_arg0: PTR 0x0 s_arg1: PTR 0x0 s_arg2: PTR 0x0 s_arg3: PTR 0x0 .size secondary_kexec_args,PTRSIZE*4 kexec_flag: LONG 0x1 #endif kexec_start_address: EXPORT(kexec_start_address) PTR 0x0 .size kexec_start_address, PTRSIZE kexec_indirection_page: EXPORT(kexec_indirection_page) PTR 0 .size kexec_indirection_page, PTRSIZE relocate_new_kernel_end: relocate_new_kernel_size: EXPORT(relocate_new_kernel_size) PTR relocate_new_kernel_end - relocate_new_kernel .size relocate_new_kernel_size, PTRSIZE
AirFortressIlikara/LS2K0300-linux-4.19
13,409
arch/mips/kernel/bmips_5xxx_init.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011-2012 by Broadcom Corporation * * Init for bmips 5000. * Used to init second core in dual core 5000's. */ #include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/addrspace.h> #include <asm/hazards.h> #include <asm/bmips.h> #ifdef CONFIG_CPU_BMIPS5000 #define cacheop(kva, size, linesize, op) \ .set noreorder ; \ addu t1, kva, size ; \ subu t2, linesize, 1 ; \ not t2 ; \ and t0, kva, t2 ; \ addiu t1, t1, -1 ; \ and t1, t2 ; \ 9: cache op, 0(t0) ; \ bne t0, t1, 9b ; \ addu t0, linesize ; \ .set reorder ; #define IS_SHIFT 22 #define IL_SHIFT 19 #define IA_SHIFT 16 #define DS_SHIFT 13 #define DL_SHIFT 10 #define DA_SHIFT 7 #define IS_MASK 7 #define IL_MASK 7 #define IA_MASK 7 #define DS_MASK 7 #define DL_MASK 7 #define DA_MASK 7 #define ICE_MASK 0x80000000 #define DCE_MASK 0x40000000 #define CP0_BRCM_CONFIG0 $22, 0 #define CP0_BRCM_MODE $22, 1 #define CP0_CONFIG_K0_MASK 7 #define CP0_ICACHE_TAG_LO $28 #define CP0_ICACHE_DATA_LO $28, 1 #define CP0_DCACHE_TAG_LO $28, 2 #define CP0_D_SEC_CACHE_DATA_LO $28, 3 #define CP0_ICACHE_TAG_HI $29 #define CP0_ICACHE_DATA_HI $29, 1 #define CP0_DCACHE_TAG_HI $29, 2 #define CP0_BRCM_MODE_Luc_MASK (1 << 11) #define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20) #define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19) #define CP0_BRCM_MODE_SET_MASK (1 << 7) #define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4) #define CP0_BRCM_MODE_BrPRED_MASK (3 << 24) #define CP0_BRCM_MODE_BrPRED_SHIFT 24 #define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20) #define CP0_BRCM_MODE_BrHIST_SHIFT 20 /* ZSC L2 Cache Register Access Register Definitions */ #define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24 #define BRCM_ZSC_CONFIG_REG 0 << 3 #define BRCM_ZSC_REQ_BUFFER_REG 2 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3 #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3 #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3 #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3 #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3 #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3 #define BRCM_ZSC_CONFIG_LMB1En 1 << (15) #define BRCM_ZSC_CONFIG_LMB0En 1 << (14) /* branch predition values */ #define BRCM_BrPRED_ALL_TAKEN (0x0) #define BRCM_BrPRED_ALL_NOT_TAKEN (0x1) #define BRCM_BrPRED_BHT_ENABLE (0x2) #define BRCM_BrPRED_PREDICT_BACKWARD (0x3) .align 2 /* * Function: size_i_cache * Arguments: None * Returns: v0 = i cache size, v1 = I cache line size * Description: compute the I-cache size and I-cache line size * Trashes: v0, v1, a0, t0 * * pseudo code: * */ LEAF(size_i_cache) .set noreorder mfc0 a0, CP0_CONFIG, 1 move t0, a0 /* * Determine sets per way: IS * * This field contains the number of sets (i.e., indices) per way of * the instruction cache: * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k * vi) 0x5 - 0x7: Reserved. */ srl a0, a0, IS_SHIFT and a0, a0, IS_MASK /* sets per way = (64<<IS) */ li v0, 0x40 sllv v0, v0, a0 /* * Determine line size * * This field contains the line size of the instruction cache: * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) * 0x5: 64 bytes, iv) the rest: Reserved. */ move a0, t0 srl a0, a0, IL_SHIFT and a0, a0, IL_MASK beqz a0, no_i_cache nop /* line size = 2 ^ (IL+1) */ addi a0, a0, 1 li v1, 1 sll v1, v1, a0 /* v0 now have sets per way, multiply it by line size now * that will give the set size */ sll v0, v0, a0 /* * Determine set associativity * * This field contains the set associativity of the instruction cache. * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: * 4-way, v) 0x4 - 0x7: Reserved. */ move a0, t0 srl a0, a0, IA_SHIFT and a0, a0, IA_MASK addi a0, a0, 0x1 /* v0 has the set size, multiply it by * set associativiy, to get the cache size */ multu v0, a0 /*multu is interlocked, so no need to insert nops */ mflo v0 b 1f nop no_i_cache: move v0, zero move v1, zero 1: jr ra nop .set reorder END(size_i_cache) /* * Function: size_d_cache * Arguments: None * Returns: v0 = d cache size, v1 = d cache line size * Description: compute the D-cache size and D-cache line size. * Trashes: v0, v1, a0, t0 * */ LEAF(size_d_cache) .set noreorder mfc0 a0, CP0_CONFIG, 1 move t0, a0 /* * Determine sets per way: IS * * This field contains the number of sets (i.e., indices) per way of * the instruction cache: * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k * vi) 0x5 - 0x7: Reserved. */ srl a0, a0, DS_SHIFT and a0, a0, DS_MASK /* sets per way = (64<<IS) */ li v0, 0x40 sllv v0, v0, a0 /* * Determine line size * * This field contains the line size of the instruction cache: * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) * 0x5: 64 bytes, iv) the rest: Reserved. */ move a0, t0 srl a0, a0, DL_SHIFT and a0, a0, DL_MASK beqz a0, no_d_cache nop /* line size = 2 ^ (IL+1) */ addi a0, a0, 1 li v1, 1 sll v1, v1, a0 /* v0 now have sets per way, multiply it by line size now * that will give the set size */ sll v0, v0, a0 /* determine set associativity * * This field contains the set associativity of the instruction cache. * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: * 4-way, v) 0x4 - 0x7: Reserved. */ move a0, t0 srl a0, a0, DA_SHIFT and a0, a0, DA_MASK addi a0, a0, 0x1 /* v0 has the set size, multiply it by * set associativiy, to get the cache size */ multu v0, a0 /*multu is interlocked, so no need to insert nops */ mflo v0 b 1f nop no_d_cache: move v0, zero move v1, zero 1: jr ra nop .set reorder END(size_d_cache) /* * Function: enable_ID * Arguments: None * Returns: None * Description: Enable I and D caches, initialize I and D-caches, also set * hardware delay for d-cache (TP0). * Trashes: t0 * */ .global enable_ID .ent enable_ID .set noreorder enable_ID: mfc0 t0, CP0_BRCM_CONFIG0 or t0, t0, (ICE_MASK | DCE_MASK) mtc0 t0, CP0_BRCM_CONFIG0 jr ra nop .end enable_ID .set reorder /* * Function: l1_init * Arguments: None * Returns: None * Description: Enable I and D caches, and initialize I and D-caches * Trashes: a0, v0, v1, t0, t1, t2, t8 * */ .globl l1_init .ent l1_init .set noreorder l1_init: /* save return address */ move t8, ra /* initialize I and D cache Data and Tag registers. */ mtc0 zero, CP0_ICACHE_TAG_LO mtc0 zero, CP0_ICACHE_TAG_HI mtc0 zero, CP0_ICACHE_DATA_LO mtc0 zero, CP0_ICACHE_DATA_HI mtc0 zero, CP0_DCACHE_TAG_LO mtc0 zero, CP0_DCACHE_TAG_HI /* Enable Caches before Clearing. If the caches are disabled * then the cache operations to clear the cache will be ignored */ jal enable_ID nop jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */ nop /* run uncached in kseg 1 */ la k0, 1f lui k1, 0x2000 or k0, k1, k0 jr k0 nop 1: /* * set K0 cache mode */ mfc0 t0, CP0_CONFIG and t0, t0, ~CP0_CONFIG_K0_MASK or t0, t0, 3 /* Write Back mode */ mtc0 t0, CP0_CONFIG /* * Initialize instruction cache. */ li a0, KSEG0 cacheop(a0, v0, v1, Index_Store_Tag_I) /* * Now we can run from I-$, kseg 0 */ la k0, 1f lui k1, 0x2000 or k0, k1, k0 xor k0, k1, k0 jr k0 nop 1: /* * Initialize data cache. */ jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */ nop li a0, KSEG0 cacheop(a0, v0, v1, Index_Store_Tag_D) jr t8 nop .end l1_init .set reorder /* * Function: set_other_config * Arguments: none * Returns: None * Description: initialize other remainder configuration to defaults. * Trashes: t0, t1 * * pseudo code: * */ LEAF(set_other_config) .set noreorder /* enable Bus error for I-fetch */ mfc0 t0, CP0_CACHEERR, 0 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 0 /* enable Bus error for Load */ mfc0 t0, CP0_CACHEERR, 1 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 1 /* enable Bus Error for Store */ mfc0 t0, CP0_CACHEERR, 2 li t1, 0x4 or t0, t1 mtc0 t0, CP0_CACHEERR, 2 jr ra nop .set reorder END(set_other_config) /* * Function: set_branch_pred * Arguments: none * Returns: None * Description: * Trashes: t0, t1 * * pseudo code: * */ LEAF(set_branch_pred) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK ) and t0, t0, t1 /* enable Branch prediction */ li t1, BRCM_BrPRED_BHT_ENABLE sll t1, CP0_BRCM_MODE_BrPRED_SHIFT or t0, t0, t1 /* set history count to 8 */ li t1, 8 sll t1, CP0_BRCM_MODE_BrHIST_SHIFT or t0, t0, t1 mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_branch_pred) /* * Function: set_luc * Arguments: set link uncached. * Returns: None * Description: * Trashes: t0, t1 * */ LEAF(set_luc) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_Luc_MASK) and t0, t0, t1 /* set Luc */ ori t0, t0, CP0_BRCM_MODE_Luc_MASK mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_luc) /* * Function: set_cwf_tse * Arguments: set CWF and TSE bits * Returns: None * Description: * Trashes: t0, t1 * */ LEAF(set_cwf_tse) .set noreorder mfc0 t0, CP0_BRCM_CONFIG0 li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK) or t0, t0, t1 mtc0 t0, CP0_BRCM_CONFIG0 jr ra nop .set reorder END(set_cwf_tse) /* * Function: set_clock_ratio * Arguments: set clock ratio specified by a0 * Returns: None * Description: * Trashes: v0, v1, a0, a1 * * pseudo code: * */ LEAF(set_clock_ratio) .set noreorder mfc0 t0, CP0_BRCM_MODE li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK) and t0, t0, t1 li t1, CP0_BRCM_MODE_SET_MASK or t0, t0, t1 or t0, t0, a0 mtc0 t0, CP0_BRCM_MODE jr ra nop .set reorder END(set_clock_ratio) /* * Function: set_zephyr * Arguments: None * Returns: None * Description: Set any zephyr bits * Trashes: t0 & t1 * */ LEAF(set_zephyr) .set noreorder /* enable read/write of CP0 #22 sel. 8 */ li t0, 0x5a455048 .word 0x4088b00f /* mtc0 t0, $22, 15 */ .word 0x4008b008 /* mfc0 t0, $22, 8 */ li t1, 0x09008000 /* turn off pref, jtb */ or t0, t0, t1 .word 0x4088b008 /* mtc0 t0, $22, 8 */ sync /* disable read/write of CP0 #22 sel 8 */ li t0, 0x0 .word 0x4088b00f /* mtc0 t0, $22, 15 */ jr ra nop .set reorder END(set_zephyr) /* * Function: set_llmb * Arguments: a0=0 disable llmb, a0=1 enables llmb * Returns: None * Description: * Trashes: t0, t1, t2 * * pseudo code: * */ LEAF(set_llmb) .set noreorder li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG sync cache 0x7, 0x0(t2) sync mfc0 t0, CP0_D_SEC_CACHE_DATA_LO li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) and t0, t0, t1 beqz a0, svlmb nop enable_lmb: li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) or t0, t0, t1 svlmb: mtc0 t0, CP0_D_SEC_CACHE_DATA_LO sync cache 0xb, 0x0(t2) sync jr ra nop .set reorder END(set_llmb) /* * Function: core_init * Arguments: none * Returns: None * Description: initialize core related configuration * Trashes: v0,v1,a0,a1,t8 * * pseudo code: * */ .globl core_init .ent core_init .set noreorder core_init: move t8, ra /* set Zephyr bits. */ bal set_zephyr nop #if ENABLE_FPU==1 /* initialize the Floating point unit (both TPs) */ bal init_fpu nop #endif /* set low latency memory bus */ li a0, 1 bal set_llmb nop /* set branch prediction (TP0 only) */ bal set_branch_pred nop /* set link uncached */ bal set_luc nop /* set CWF and TSE */ bal set_cwf_tse nop /* *set clock ratio by setting 1 to 'set' * and 0 to ClkRatio, (TP0 only) */ li a0, 0 bal set_clock_ratio nop /* set other configuration to defaults */ bal set_other_config nop move ra, t8 jr ra nop .set reorder .end core_init /* * Function: clear_jump_target_buffer * Arguments: None * Returns: None * Description: * Trashes: t0, t1, t2 * */ #define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16) #define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16) #define JTB_CS_CNTL_MASK (0xFF<<16) .globl clear_jump_target_buffer .ent clear_jump_target_buffer .set noreorder clear_jump_target_buffer: mfc0 t0, $22, 2 nop nop li t1, ~JTB_CS_CNTL_MASK and t0, t0, t1 li t2, RESET_CALL_RETURN_STACK_THIS_THREAD or t0, t0, t2 mtc0 t0, $22, 2 nop nop and t0, t0, t1 li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD or t0, t0, t2 mtc0 t0, $22, 2 nop nop jr ra nop .end clear_jump_target_buffer .set reorder /* * Function: bmips_cache_init * Arguments: None * Returns: None * Description: Enable I and D caches, and initialize I and D-caches * Trashes: v0, v1, t0, t1, t2, t5, t7, t8 * */ .globl bmips_5xxx_init .ent bmips_5xxx_init .set noreorder bmips_5xxx_init: /* save return address and A0 */ move t7, ra move t5, a0 jal l1_init nop jal core_init nop jal clear_jump_target_buffer nop mtc0 zero, CP0_CAUSE move a0, t5 jr t7 nop .end bmips_5xxx_init .set reorder #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,596
arch/mips/kernel/r4k_switch.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1994, 1995, 1996, by Andreas Busse * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/asmmacro.h> /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ .align 5 LEAF(resume) mfc0 t1, CP0_STATUS LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 LONG_L a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 #ifdef CONFIG_CPU_LOONGSON3 or a2, ST0_MM #endif mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume)
AirFortressIlikara/LS2K0300-linux-4.19
4,964
arch/mips/kernel/cps-vec-ns16550.S
/* * Copyright (C) 2015 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <linux/serial_reg.h> #define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT) #define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT) /** * _mips_cps_putc() - write a character to the UART * @a0: ASCII character to write * @t9: UART base address */ LEAF(_mips_cps_putc) 1: lw t0, UART_LSR_OFS(t9) andi t0, t0, UART_LSR_TEMT beqz t0, 1b sb a0, UART_TX_OFS(t9) jr ra END(_mips_cps_putc) /** * _mips_cps_puts() - write a string to the UART * @a0: pointer to NULL-terminated ASCII string * @t9: UART base address * * Write a null-terminated ASCII string to the UART. */ NESTED(_mips_cps_puts, 0, ra) move s7, ra move s6, a0 1: lb a0, 0(s6) beqz a0, 2f jal _mips_cps_putc PTR_ADDIU s6, s6, 1 b 1b 2: jr s7 END(_mips_cps_puts) /** * _mips_cps_putx4 - write a 4b hex value to the UART * @a0: the 4b value to write to the UART * @t9: UART base address * * Write a single hexadecimal character to the UART. */ NESTED(_mips_cps_putx4, 0, ra) andi a0, a0, 0xf li t0, '0' blt a0, 10, 1f li t0, 'a' addiu a0, a0, -10 1: addu a0, a0, t0 b _mips_cps_putc END(_mips_cps_putx4) /** * _mips_cps_putx8 - write an 8b hex value to the UART * @a0: the 8b value to write to the UART * @t9: UART base address * * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx8, 0, ra) move s3, ra move s2, a0 srl a0, a0, 4 jal _mips_cps_putx4 move a0, s2 move ra, s3 b _mips_cps_putx4 END(_mips_cps_putx8) /** * _mips_cps_putx16 - write a 16b hex value to the UART * @a0: the 16b value to write to the UART * @t9: UART base address * * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx16, 0, ra) move s5, ra move s4, a0 srl a0, a0, 8 jal _mips_cps_putx8 move a0, s4 move ra, s5 b _mips_cps_putx8 END(_mips_cps_putx16) /** * _mips_cps_putx32 - write a 32b hex value to the UART * @a0: the 32b value to write to the UART * @t9: UART base address * * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx32, 0, ra) move s7, ra move s6, a0 srl a0, a0, 16 jal _mips_cps_putx16 move a0, s6 move ra, s7 b _mips_cps_putx16 END(_mips_cps_putx32) #ifdef CONFIG_64BIT /** * _mips_cps_putx64 - write a 64b hex value to the UART * @a0: the 64b value to write to the UART * @t9: UART base address * * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART. */ NESTED(_mips_cps_putx64, 0, ra) move sp, ra move s8, a0 dsrl32 a0, a0, 0 jal _mips_cps_putx32 move a0, s8 move ra, sp b _mips_cps_putx32 END(_mips_cps_putx64) #define _mips_cps_putxlong _mips_cps_putx64 #else /* !CONFIG_64BIT */ #define _mips_cps_putxlong _mips_cps_putx32 #endif /* !CONFIG_64BIT */ /** * mips_cps_bev_dump() - dump relevant exception state to UART * @a0: pointer to NULL-terminated ASCII string naming the exception * * Write information that may be useful in debugging an exception to the * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception * will only be run if something goes horribly wrong very early during * the bringup of a core and it is very likely to be unsafe to perform * memory accesses at that point (cache state indeterminate, EVA may not * be configured, coherence may be disabled) let alone have a stack, * this is all written in assembly using only registers & unmapped * uncached access to the UART registers. */ LEAF(mips_cps_bev_dump) move s0, ra move s1, a0 li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE) PTR_LA a0, str_newline jal _mips_cps_puts PTR_LA a0, str_bev jal _mips_cps_puts move a0, s1 jal _mips_cps_puts PTR_LA a0, str_newline jal _mips_cps_puts PTR_LA a0, str_newline jal _mips_cps_puts #define DUMP_COP0_REG(reg, name, sz, _mfc0) \ PTR_LA a0, 8f; \ jal _mips_cps_puts; \ _mfc0 a0, reg; \ jal _mips_cps_putx##sz; \ PTR_LA a0, str_newline; \ jal _mips_cps_puts; \ TEXT(name) DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0) DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0) DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0) DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0) DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0) PTR_LA a0, str_newline jal _mips_cps_puts jr s0 END(mips_cps_bev_dump) .pushsection .data str_bev: .asciiz "BEV Exception: " str_newline: .asciiz "\r\n" .popsection
AirFortressIlikara/LS2K0300-linux-4.19
10,582
arch/mips/kernel/scall64-n32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/unistd.h> #ifndef CONFIG_MIPS32_O32 /* No O32, so define handle_sys here */ #define handle_sysn32 handle_sys #endif .align 5 NESTED(handle_sysn32, PT_SIZE, sp) #ifndef CONFIG_MIPS32_O32 .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at #endif dsubu t0, v0, __NR_N32_Linux # check syscall number sltiu t0, t0, __NR_N32_Linux_syscalls + 1 #ifndef CONFIG_MIPS32_O32 ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif beqz t0, not_n32_scall sd a3, PT_R26(sp) # save a3 for syscall restarting li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, n32_syscall_trace_entry syscall_common: dsll t0, v0, 3 # offset into table ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result j syscall_exit_partial /* ------------------------------------------------------------------------ */ n32_syscall_trace_entry: SAVE_STATIC move a0, sp move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) dsubu t2, v0, __NR_N32_Linux # check (new) syscall number sltiu t0, t2, __NR_N32_Linux_syscalls + 1 beqz t0, not_n32_scall j syscall_common 1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to the n64 syscall handlers. */ j handle_sys64 END(handle_sysn32) .type sysn32_call_table, @object EXPORT(sysn32_call_table) PTR sys_read /* 6000 */ PTR sys_write PTR sys_open PTR sys_close PTR sys_newstat PTR sys_newfstat /* 6005 */ PTR sys_newlstat PTR sys_poll PTR sys_lseek PTR sys_mips_mmap PTR sys_mprotect /* 6010 */ PTR sys_munmap PTR sys_brk PTR compat_sys_rt_sigaction PTR compat_sys_rt_sigprocmask PTR compat_sys_ioctl /* 6015 */ PTR sys_pread64 PTR sys_pwrite64 PTR compat_sys_readv PTR compat_sys_writev PTR sys_access /* 6020 */ PTR sysm_pipe PTR compat_sys_select PTR sys_sched_yield PTR sys_mremap PTR sys_msync /* 6025 */ PTR sys_mincore PTR sys_madvise PTR sys_shmget PTR sys_shmat PTR compat_sys_shmctl /* 6030 */ PTR sys_dup PTR sys_dup2 PTR sys_pause PTR compat_sys_nanosleep PTR compat_sys_getitimer /* 6035 */ PTR compat_sys_setitimer PTR sys_alarm PTR sys_getpid PTR compat_sys_sendfile PTR sys_socket /* 6040 */ PTR sys_connect PTR sys_accept PTR sys_sendto PTR compat_sys_recvfrom PTR compat_sys_sendmsg /* 6045 */ PTR compat_sys_recvmsg PTR sys_shutdown PTR sys_bind PTR sys_listen PTR sys_getsockname /* 6050 */ PTR sys_getpeername PTR sys_socketpair PTR compat_sys_setsockopt PTR compat_sys_getsockopt PTR __sys_clone /* 6055 */ PTR __sys_fork PTR compat_sys_execve PTR sys_exit PTR compat_sys_wait4 PTR sys_kill /* 6060 */ PTR sys_newuname PTR sys_semget PTR sys_semop PTR compat_sys_semctl PTR sys_shmdt /* 6065 */ PTR sys_msgget PTR compat_sys_msgsnd PTR compat_sys_msgrcv PTR compat_sys_msgctl PTR compat_sys_fcntl /* 6070 */ PTR sys_flock PTR sys_fsync PTR sys_fdatasync PTR sys_truncate PTR sys_ftruncate /* 6075 */ PTR compat_sys_getdents PTR sys_getcwd PTR sys_chdir PTR sys_fchdir PTR sys_rename /* 6080 */ PTR sys_mkdir PTR sys_rmdir PTR sys_creat PTR sys_link PTR sys_unlink /* 6085 */ PTR sys_symlink PTR sys_readlink PTR sys_chmod PTR sys_fchmod PTR sys_chown /* 6090 */ PTR sys_fchown PTR sys_lchown PTR sys_umask PTR compat_sys_gettimeofday PTR compat_sys_getrlimit /* 6095 */ PTR compat_sys_getrusage PTR compat_sys_sysinfo PTR compat_sys_times PTR compat_sys_ptrace PTR sys_getuid /* 6100 */ PTR sys_syslog PTR sys_getgid PTR sys_setuid PTR sys_setgid PTR sys_geteuid /* 6105 */ PTR sys_getegid PTR sys_setpgid PTR sys_getppid PTR sys_getpgrp PTR sys_setsid /* 6110 */ PTR sys_setreuid PTR sys_setregid PTR sys_getgroups PTR sys_setgroups PTR sys_setresuid /* 6115 */ PTR sys_getresuid PTR sys_setresgid PTR sys_getresgid PTR sys_getpgid PTR sys_setfsuid /* 6120 */ PTR sys_setfsgid PTR sys_getsid PTR sys_capget PTR sys_capset PTR compat_sys_rt_sigpending /* 6125 */ PTR compat_sys_rt_sigtimedwait PTR compat_sys_rt_sigqueueinfo PTR compat_sys_rt_sigsuspend PTR compat_sys_sigaltstack PTR compat_sys_utime /* 6130 */ PTR sys_mknod PTR sys_32_personality PTR compat_sys_ustat PTR compat_sys_statfs PTR compat_sys_fstatfs /* 6135 */ PTR sys_sysfs PTR sys_getpriority PTR sys_setpriority PTR sys_sched_setparam PTR sys_sched_getparam /* 6140 */ PTR sys_sched_setscheduler PTR sys_sched_getscheduler PTR sys_sched_get_priority_max PTR sys_sched_get_priority_min PTR compat_sys_sched_rr_get_interval /* 6145 */ PTR sys_mlock PTR sys_munlock PTR sys_mlockall PTR sys_munlockall PTR sys_vhangup /* 6150 */ PTR sys_pivot_root PTR compat_sys_sysctl PTR sys_prctl PTR compat_sys_adjtimex PTR compat_sys_setrlimit /* 6155 */ PTR sys_chroot PTR sys_sync PTR sys_acct PTR compat_sys_settimeofday PTR compat_sys_mount /* 6160 */ PTR sys_umount PTR sys_swapon PTR sys_swapoff PTR sys_reboot PTR sys_sethostname /* 6165 */ PTR sys_setdomainname PTR sys_ni_syscall /* was create_module */ PTR sys_init_module PTR sys_delete_module PTR sys_ni_syscall /* 6170, was get_kernel_syms */ PTR sys_ni_syscall /* was query_module */ PTR sys_quotactl PTR sys_ni_syscall /* was nfsservctl */ PTR sys_ni_syscall /* res. for getpmsg */ PTR sys_ni_syscall /* 6175 for putpmsg */ PTR sys_ni_syscall /* res. for afs_syscall */ PTR sys_ni_syscall /* res. for security */ PTR sys_gettid PTR sys_readahead PTR sys_setxattr /* 6180 */ PTR sys_lsetxattr PTR sys_fsetxattr PTR sys_getxattr PTR sys_lgetxattr PTR sys_fgetxattr /* 6185 */ PTR sys_listxattr PTR sys_llistxattr PTR sys_flistxattr PTR sys_removexattr PTR sys_lremovexattr /* 6190 */ PTR sys_fremovexattr PTR sys_tkill PTR sys_ni_syscall PTR compat_sys_futex PTR compat_sys_sched_setaffinity /* 6195 */ PTR compat_sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl PTR __sys_sysmips PTR compat_sys_io_setup /* 6200 */ PTR sys_io_destroy PTR compat_sys_io_getevents PTR compat_sys_io_submit PTR sys_io_cancel PTR sys_exit_group /* 6205 */ PTR sys_lookup_dcookie PTR sys_epoll_create PTR sys_epoll_ctl PTR sys_epoll_wait PTR sys_remap_file_pages /* 6210 */ PTR sysn32_rt_sigreturn PTR compat_sys_fcntl64 PTR sys_set_tid_address PTR sys_restart_syscall PTR compat_sys_semtimedop /* 6215 */ PTR sys_fadvise64_64 PTR compat_sys_statfs64 PTR compat_sys_fstatfs64 PTR sys_sendfile64 PTR compat_sys_timer_create /* 6220 */ PTR compat_sys_timer_settime PTR compat_sys_timer_gettime PTR sys_timer_getoverrun PTR sys_timer_delete PTR compat_sys_clock_settime /* 6225 */ PTR compat_sys_clock_gettime PTR compat_sys_clock_getres PTR compat_sys_clock_nanosleep PTR sys_tgkill PTR compat_sys_utimes /* 6230 */ PTR compat_sys_mbind PTR compat_sys_get_mempolicy PTR compat_sys_set_mempolicy PTR compat_sys_mq_open PTR sys_mq_unlink /* 6235 */ PTR compat_sys_mq_timedsend PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify PTR compat_sys_mq_getsetattr PTR sys_ni_syscall /* 6240, sys_vserver */ PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key PTR compat_sys_keyctl /* 6245 */ PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch PTR sys_inotify_rm_watch PTR compat_sys_migrate_pages /* 6250 */ PTR sys_openat PTR sys_mkdirat PTR sys_mknodat PTR sys_fchownat PTR compat_sys_futimesat /* 6255 */ PTR sys_newfstatat PTR sys_unlinkat PTR sys_renameat PTR sys_linkat PTR sys_symlinkat /* 6260 */ PTR sys_readlinkat PTR sys_fchmodat PTR sys_faccessat PTR compat_sys_pselect6 PTR compat_sys_ppoll /* 6265 */ PTR sys_unshare PTR sys_splice PTR sys_sync_file_range PTR sys_tee PTR compat_sys_vmsplice /* 6270 */ PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list PTR compat_sys_kexec_load PTR sys_getcpu /* 6275 */ PTR compat_sys_epoll_pwait PTR sys_ioprio_set PTR sys_ioprio_get PTR compat_sys_utimensat PTR compat_sys_signalfd /* 6280 */ PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create PTR compat_sys_timerfd_gettime /* 6285 */ PTR compat_sys_timerfd_settime PTR compat_sys_signalfd4 PTR sys_eventfd2 PTR sys_epoll_create1 PTR sys_dup3 /* 6290 */ PTR sys_pipe2 PTR sys_inotify_init1 PTR compat_sys_preadv PTR compat_sys_pwritev PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ PTR sys_perf_event_open PTR sys_accept4 PTR compat_sys_recvmmsg PTR sys_getdents64 PTR sys_fanotify_init /* 6300 */ PTR sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR sys_open_by_handle_at PTR compat_sys_clock_adjtime /* 6305 */ PTR sys_syncfs PTR compat_sys_sendmmsg PTR sys_setns PTR compat_sys_process_vm_readv PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr PTR sys_renameat2 /* 6315 */ PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf PTR compat_sys_execveat /* 6320 */ PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range PTR compat_sys_preadv2 /* 6325 */ PTR compat_sys_pwritev2 PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free PTR sys_statx /* 6330 */ PTR sys_rseq PTR compat_sys_io_pgetevents .size sysn32_call_table,.-sysn32_call_table
AirFortressIlikara/LS2K0300-linux-4.19
1,495
arch/mips/kernel/r2300_switch.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * r2300_switch.S: R2300 specific task switching code. * * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle * Copyright (C) 1994, 1995, 1996 by Andreas Busse * * Multi-cpu abstraction and macros for easier reading: * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * Further modifications to make this work: * Copyright (c) 1998-2000 Harald Koerfgen */ #include <asm/asm.h> #include <asm/cachectl.h> #include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/asmmacro.h> .set mips1 .align 5 /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS sw t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) #endif /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. */ move $28, a2 cpu_restore_nonscratch a1 addiu t1, $28, _THREAD_SIZE - 32 sw t1, kernelsp mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 lw a2, THREAD_STATUS(a1) nor a3, $0, a3 and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS move v0, a0 jr ra END(resume)
AirFortressIlikara/LS2K0300-linux-4.19
14,255
arch/mips/kernel/scall64-o32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer * * Hairy, the userspace application uses a different argument passing * convention than the kernel, so we have to translate things from o32 * to ABI64 calling convention. 64-bit syscalls are also processed * here for now. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/sysmips.h> .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at ld t1, PT_EPC(sp) # skip syscall on return dsubu t0, v0, __NR_O32_Linux # check syscall number sltiu t0, t0, __NR_O32_Linux_syscalls + 1 daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) beqz t0, not_o32_scall #if 0 SAVE_ALL move a1, v0 PRINT("Scall %ld\n") RESTORE_ALL #endif /* We don't want to stumble over broken sign extensions from userland. O32 does never use the upper half. */ sll a0, a0, 0 sll a1, a1, 0 sll a2, a2, 0 sll a3, a3, 0 sd a3, PT_R26(sp) # save a3 for syscall restarting /* * More than four arguments. Try to deal with it by copying the * stack arguments from the user stack to the kernel stack. * This Sucks (TM). * * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ ld t0, PT_R29(sp) # get old user stack pointer daddu t1, t0, 32 bltz t1, bad_stack load_a4: lw a4, 16(t0) # argument #5 from usp load_a5: lw a5, 20(t0) # argument #6 from usp load_a6: lw a6, 24(t0) # argument #7 from usp load_a7: lw a7, 28(t0) # argument #8 from usp loads_done: .section __ex_table,"a" PTR load_a4, bad_stack_a4 PTR load_a5, bad_stack_a5 PTR load_a6, bad_stack_a6 PTR load_a7, bad_stack_a7 .previous li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, trace_a_syscall syscall_common: dsll t0, v0, 3 # offset into table ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0) jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sd t0, PT_R7(sp) # set error flag beqz t0, 1f ld t1, PT_R2(sp) # syscall number dnegu v0 # error sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result o32_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ trace_a_syscall: SAVE_STATIC sd a4, PT_R8(sp) # Save argument registers sd a5, PT_R9(sp) sd a6, PT_R10(sp) sd a7, PT_R11(sp) # For indirect syscalls move a0, sp /* * absolute syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 * note: NR_syscall is the first O32 syscall but the macro is * only defined when compiling with -mabi=32 (CONFIG_32BIT) * therefore __NR_O32_Linux is used (4000) */ .set push .set reorder subu t1, v0, __NR_O32_Linux move a1, v0 bnez t1, 1f /* __NR_syscall at offset 0 */ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ .set pop 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC ld v0, PT_R2(sp) # Restore syscall (maybe modified) ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) ld a2, PT_R6(sp) ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls dsubu t0, v0, __NR_O32_Linux # check (new) syscall number sltiu t0, t0, __NR_O32_Linux_syscalls + 1 beqz t0, not_o32_scall j syscall_common 1: j syscall_exit /* ------------------------------------------------------------------------ */ /* * The stackpointer for a call with more than 4 arguments is bad. */ bad_stack: li v0, EFAULT sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) j o32_syscall_exit bad_stack_a4: li a4, 0 b load_a5 bad_stack_a5: li a5, 0 b load_a6 bad_stack_a6: li a6, 0 b load_a7 bad_stack_a7: li a7, 0 b loads_done not_o32_scall: /* * This is not an o32 compatibility syscall, pass it on * to the 64-bit syscall handlers. */ #ifdef CONFIG_MIPS32_N32 j handle_sysn32 #else j handle_sys64 #endif END(handle_sys) LEAF(sys32_syscall) subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls + 1 beqz t0, einval # do not recurse dsll t1, t0, 3 beqz v0, einval ld t2, sys32_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 move a2, a3 move a3, a4 move a4, a5 move a5, a6 move a6, a7 jr t2 /* Unreached */ einval: li v0, -ENOSYS jr ra END(sys32_syscall) .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) PTR sys32_syscall /* 4000 */ PTR sys_exit PTR __sys_fork PTR sys_read PTR sys_write PTR compat_sys_open /* 4005 */ PTR sys_close PTR sys_waitpid PTR sys_creat PTR sys_link PTR sys_unlink /* 4010 */ PTR compat_sys_execve PTR sys_chdir PTR compat_sys_time PTR sys_mknod PTR sys_chmod /* 4015 */ PTR sys_lchown PTR sys_ni_syscall PTR sys_ni_syscall /* was sys_stat */ PTR sys_lseek PTR sys_getpid /* 4020 */ PTR compat_sys_mount PTR sys_oldumount PTR sys_setuid PTR sys_getuid PTR compat_sys_stime /* 4025 */ PTR compat_sys_ptrace PTR sys_alarm PTR sys_ni_syscall /* was sys_fstat */ PTR sys_pause PTR compat_sys_utime /* 4030 */ PTR sys_ni_syscall PTR sys_ni_syscall PTR sys_access PTR sys_nice PTR sys_ni_syscall /* 4035 */ PTR sys_sync PTR sys_kill PTR sys_rename PTR sys_mkdir PTR sys_rmdir /* 4040 */ PTR sys_dup PTR sysm_pipe PTR compat_sys_times PTR sys_ni_syscall PTR sys_brk /* 4045 */ PTR sys_setgid PTR sys_getgid PTR sys_ni_syscall /* was signal 2 */ PTR sys_geteuid PTR sys_getegid /* 4050 */ PTR sys_acct PTR sys_umount PTR sys_ni_syscall PTR compat_sys_ioctl PTR compat_sys_fcntl /* 4055 */ PTR sys_ni_syscall PTR sys_setpgid PTR sys_ni_syscall PTR sys_olduname PTR sys_umask /* 4060 */ PTR sys_chroot PTR compat_sys_ustat PTR sys_dup2 PTR sys_getppid PTR sys_getpgrp /* 4065 */ PTR sys_setsid PTR sys_32_sigaction PTR sys_sgetmask PTR sys_ssetmask PTR sys_setreuid /* 4070 */ PTR sys_setregid PTR sys32_sigsuspend PTR compat_sys_sigpending PTR sys_sethostname PTR compat_sys_setrlimit /* 4075 */ PTR compat_sys_getrlimit PTR compat_sys_getrusage PTR compat_sys_gettimeofday PTR compat_sys_settimeofday PTR sys_getgroups /* 4080 */ PTR sys_setgroups PTR sys_ni_syscall /* old_select */ PTR sys_symlink PTR sys_ni_syscall /* was sys_lstat */ PTR sys_readlink /* 4085 */ PTR sys_uselib PTR sys_swapon PTR sys_reboot PTR compat_sys_old_readdir PTR sys_mips_mmap /* 4090 */ PTR sys_munmap PTR compat_sys_truncate PTR compat_sys_ftruncate PTR sys_fchmod PTR sys_fchown /* 4095 */ PTR sys_getpriority PTR sys_setpriority PTR sys_ni_syscall PTR compat_sys_statfs PTR compat_sys_fstatfs /* 4100 */ PTR sys_ni_syscall /* sys_ioperm */ PTR compat_sys_socketcall PTR sys_syslog PTR compat_sys_setitimer PTR compat_sys_getitimer /* 4105 */ PTR compat_sys_newstat PTR compat_sys_newlstat PTR compat_sys_newfstat PTR sys_uname PTR sys_ni_syscall /* sys_ioperm *//* 4110 */ PTR sys_vhangup PTR sys_ni_syscall /* was sys_idle */ PTR sys_ni_syscall /* sys_vm86 */ PTR compat_sys_wait4 PTR sys_swapoff /* 4115 */ PTR compat_sys_sysinfo PTR compat_sys_ipc PTR sys_fsync PTR sys32_sigreturn PTR __sys_clone /* 4120 */ PTR sys_setdomainname PTR sys_newuname PTR sys_ni_syscall /* sys_modify_ldt */ PTR compat_sys_adjtimex PTR sys_mprotect /* 4125 */ PTR compat_sys_sigprocmask PTR sys_ni_syscall /* was creat_module */ PTR sys_init_module PTR sys_delete_module PTR sys_ni_syscall /* 4130, get_kernel_syms */ PTR sys_quotactl PTR sys_getpgid PTR sys_fchdir PTR sys_bdflush PTR sys_sysfs /* 4135 */ PTR sys_32_personality PTR sys_ni_syscall /* for afs_syscall */ PTR sys_setfsuid PTR sys_setfsgid PTR sys_32_llseek /* 4140 */ PTR compat_sys_getdents PTR compat_sys_select PTR sys_flock PTR sys_msync PTR compat_sys_readv /* 4145 */ PTR compat_sys_writev PTR sys_cacheflush PTR sys_cachectl PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync PTR compat_sys_sysctl PTR sys_mlock PTR sys_munlock /* 4155 */ PTR sys_mlockall PTR sys_munlockall PTR sys_sched_setparam PTR sys_sched_getparam PTR sys_sched_setscheduler /* 4160 */ PTR sys_sched_getscheduler PTR sys_sched_yield PTR sys_sched_get_priority_max PTR sys_sched_get_priority_min PTR compat_sys_sched_rr_get_interval /* 4165 */ PTR compat_sys_nanosleep PTR sys_mremap PTR sys_accept PTR sys_bind PTR sys_connect /* 4170 */ PTR sys_getpeername PTR sys_getsockname PTR compat_sys_getsockopt PTR sys_listen PTR compat_sys_recv /* 4175 */ PTR compat_sys_recvfrom PTR compat_sys_recvmsg PTR sys_send PTR compat_sys_sendmsg PTR sys_sendto /* 4180 */ PTR compat_sys_setsockopt PTR sys_shutdown PTR sys_socket PTR sys_socketpair PTR sys_setresuid /* 4185 */ PTR sys_getresuid PTR sys_ni_syscall /* was query_module */ PTR sys_poll PTR sys_ni_syscall /* was nfsservctl */ PTR sys_setresgid /* 4190 */ PTR sys_getresgid PTR sys_prctl PTR sys32_rt_sigreturn PTR compat_sys_rt_sigaction PTR compat_sys_rt_sigprocmask /* 4195 */ PTR compat_sys_rt_sigpending PTR compat_sys_rt_sigtimedwait PTR compat_sys_rt_sigqueueinfo PTR compat_sys_rt_sigsuspend PTR sys_32_pread /* 4200 */ PTR sys_32_pwrite PTR sys_chown PTR sys_getcwd PTR sys_capget PTR sys_capset /* 4205 */ PTR compat_sys_sigaltstack PTR compat_sys_sendfile PTR sys_ni_syscall PTR sys_ni_syscall PTR sys_mips_mmap2 /* 4210 */ PTR sys_32_truncate64 PTR sys_32_ftruncate64 PTR sys_newstat PTR sys_newlstat PTR sys_newfstat /* 4215 */ PTR sys_pivot_root PTR sys_mincore PTR sys_madvise PTR sys_getdents64 PTR compat_sys_fcntl64 /* 4220 */ PTR sys_ni_syscall PTR sys_gettid PTR sys32_readahead PTR sys_setxattr PTR sys_lsetxattr /* 4225 */ PTR sys_fsetxattr PTR sys_getxattr PTR sys_lgetxattr PTR sys_fgetxattr PTR sys_listxattr /* 4230 */ PTR sys_llistxattr PTR sys_flistxattr PTR sys_removexattr PTR sys_lremovexattr PTR sys_fremovexattr /* 4235 */ PTR sys_tkill PTR sys_sendfile64 PTR compat_sys_futex PTR compat_sys_sched_setaffinity PTR compat_sys_sched_getaffinity /* 4240 */ PTR compat_sys_io_setup PTR sys_io_destroy PTR compat_sys_io_getevents PTR compat_sys_io_submit PTR sys_io_cancel /* 4245 */ PTR sys_exit_group PTR compat_sys_lookup_dcookie PTR sys_epoll_create PTR sys_epoll_ctl PTR sys_epoll_wait /* 4250 */ PTR sys_remap_file_pages PTR sys_set_tid_address PTR sys_restart_syscall PTR sys32_fadvise64_64 PTR compat_sys_statfs64 /* 4255 */ PTR compat_sys_fstatfs64 PTR compat_sys_timer_create PTR compat_sys_timer_settime PTR compat_sys_timer_gettime PTR sys_timer_getoverrun /* 4260 */ PTR sys_timer_delete PTR compat_sys_clock_settime PTR compat_sys_clock_gettime PTR compat_sys_clock_getres PTR compat_sys_clock_nanosleep /* 4265 */ PTR sys_tgkill PTR compat_sys_utimes PTR compat_sys_mbind PTR compat_sys_get_mempolicy PTR compat_sys_set_mempolicy /* 4270 */ PTR compat_sys_mq_open PTR sys_mq_unlink PTR compat_sys_mq_timedsend PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify /* 4275 */ PTR compat_sys_mq_getsetattr PTR sys_ni_syscall /* sys_vserver */ PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ PTR sys_request_key PTR compat_sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ PTR sys_inotify_rm_watch PTR compat_sys_migrate_pages PTR compat_sys_openat PTR sys_mkdirat PTR sys_mknodat /* 4290 */ PTR sys_fchownat PTR compat_sys_futimesat PTR sys_newfstatat PTR sys_unlinkat PTR sys_renameat /* 4295 */ PTR sys_linkat PTR sys_symlinkat PTR sys_readlinkat PTR sys_fchmodat PTR sys_faccessat /* 4300 */ PTR compat_sys_pselect6 PTR compat_sys_ppoll PTR sys_unshare PTR sys_splice PTR sys32_sync_file_range /* 4305 */ PTR sys_tee PTR compat_sys_vmsplice PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list /* 4310 */ PTR compat_sys_kexec_load PTR sys_getcpu PTR compat_sys_epoll_pwait PTR sys_ioprio_set PTR sys_ioprio_get /* 4315 */ PTR compat_sys_utimensat PTR compat_sys_signalfd PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create PTR compat_sys_timerfd_gettime PTR compat_sys_timerfd_settime PTR compat_sys_signalfd4 PTR sys_eventfd2 /* 4325 */ PTR sys_epoll_create1 PTR sys_dup3 PTR sys_pipe2 PTR sys_inotify_init1 PTR compat_sys_preadv /* 4330 */ PTR compat_sys_pwritev PTR compat_sys_rt_tgsigqueueinfo PTR sys_perf_event_open PTR sys_accept4 PTR compat_sys_recvmmsg /* 4335 */ PTR sys_fanotify_init PTR compat_sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR compat_sys_open_by_handle_at /* 4340 */ PTR compat_sys_clock_adjtime PTR sys_syncfs PTR compat_sys_sendmmsg PTR sys_setns PTR compat_sys_process_vm_readv /* 4345 */ PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR compat_sys_execveat PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 4360 */ PTR compat_sys_preadv2 PTR compat_sys_pwritev2 PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ PTR sys_statx PTR sys_rseq PTR compat_sys_io_pgetevents .size sys32_call_table,.-sys32_call_table
AirFortressIlikara/LS2K0300-linux-4.19
4,466
arch/mips/kernel/mcount.S
/* * MIPS specific _mcount support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive for * more details. * * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China * Copyright (C) 2010 DSLab, Lanzhou University, China * Author: Wu Zhangjin <wuzhangjin@gmail.com> */ #include <asm/export.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/ftrace.h> .text .set noreorder .set noat .macro MCOUNT_SAVE_REGS PTR_SUBU sp, PT_SIZE PTR_S ra, PT_R31(sp) PTR_S AT, PT_R1(sp) PTR_S a0, PT_R4(sp) PTR_S a1, PT_R5(sp) PTR_S a2, PT_R6(sp) PTR_S a3, PT_R7(sp) #ifdef CONFIG_64BIT PTR_S a4, PT_R8(sp) PTR_S a5, PT_R9(sp) PTR_S a6, PT_R10(sp) PTR_S a7, PT_R11(sp) #endif .endm .macro MCOUNT_RESTORE_REGS PTR_L ra, PT_R31(sp) PTR_L AT, PT_R1(sp) PTR_L a0, PT_R4(sp) PTR_L a1, PT_R5(sp) PTR_L a2, PT_R6(sp) PTR_L a3, PT_R7(sp) #ifdef CONFIG_64BIT PTR_L a4, PT_R8(sp) PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) #endif PTR_ADDIU sp, PT_SIZE .endm .macro RETURN_BACK jr ra move ra, AT .endm /* * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass * the location of the parent's return address. */ #define MCOUNT_RA_ADDRESS_REG $12 #ifdef CONFIG_DYNAMIC_FTRACE NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: EXPORT_SYMBOL(_mcount) b ftrace_stub #ifdef CONFIG_32BIT addiu sp,sp,8 #else nop #endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ MCOUNT_SAVE_REGS #ifdef KBUILD_MCOUNT_RA_ADDRESS PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) #endif PTR_SUBU a0, ra, 8 /* arg1: self address */ PTR_LA t1, _stext sltu t2, a0, t1 /* t2 = (a0 < _stext) */ PTR_LA t1, _etext sltu t3, t1, a0 /* t3 = (a0 > _etext) */ or t1, t2, t3 beqz t1, ftrace_call nop #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */ #else PTR_SUBU a0, a0, 12 #endif .globl ftrace_call ftrace_call: nop /* a placeholder for the call to a real tracing function */ move a1, AT /* arg2: parent's return address */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: nop nop #endif MCOUNT_RESTORE_REGS .globl ftrace_stub ftrace_stub: RETURN_BACK END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ NESTED(_mcount, PT_SIZE, ra) EXPORT_SYMBOL(_mcount) PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ beq t1, t2, fgraph_trace nop MCOUNT_SAVE_REGS move a0, ra /* arg1: self return address */ jalr t2 /* (1) call *ftrace_trace_function */ move a1, AT /* arg2: parent's return address */ MCOUNT_RESTORE_REGS fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER PTR_LA t1, ftrace_stub PTR_L t3, ftrace_graph_return bne t1, t3, ftrace_graph_caller nop PTR_LA t1, ftrace_graph_entry_stub PTR_L t3, ftrace_graph_entry bne t1, t3, ftrace_graph_caller nop #endif #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif .globl ftrace_stub ftrace_stub: RETURN_BACK END(_mcount) #endif /* ! CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER NESTED(ftrace_graph_caller, PT_SIZE, ra) #ifndef CONFIG_DYNAMIC_FTRACE MCOUNT_SAVE_REGS #endif /* arg1: Get the location of the parent's return address */ #ifdef KBUILD_MCOUNT_RA_ADDRESS #ifdef CONFIG_DYNAMIC_FTRACE PTR_L a0, PT_R12(sp) #else move a0, MCOUNT_RA_ADDRESS_REG #endif bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ nop #endif PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ 1: /* arg2: Get self return address */ #ifdef CONFIG_DYNAMIC_FTRACE PTR_L a1, PT_R31(sp) #else move a1, ra #endif /* arg3: Get frame pointer of current stack */ #ifdef CONFIG_64BIT PTR_LA a2, PT_SIZE(sp) #else PTR_LA a2, (PT_SIZE+8)(sp) #endif jal prepare_ftrace_return nop MCOUNT_RESTORE_REGS #ifndef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif #endif RETURN_BACK END(ftrace_graph_caller) .align 2 .globl return_to_handler return_to_handler: PTR_SUBU sp, PT_SIZE PTR_S v0, PT_R2(sp) jal ftrace_return_to_handler PTR_S v1, PT_R3(sp) /* restore the real parent address: v0 -> ra */ move ra, v0 PTR_L v0, PT_R2(sp) PTR_L v1, PT_R3(sp) jr ra PTR_ADDIU sp, PT_SIZE #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ .set at .set reorder
AirFortressIlikara/LS2K0300-linux-4.19
5,236
arch/mips/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #define PAGE_SIZE _PAGE_SIZE /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will * ensure that it has .bss alignment (64K). */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) #include <asm-generic/vmlinux.lds.h> #undef mips #define mips mips OUTPUT_ARCH(mips) ENTRY(kernel_entry) PHDRS { text PT_LOAD FLAGS(7); /* RWX */ #ifndef CONFIG_CAVIUM_OCTEON_SOC note PT_NOTE FLAGS(4); /* R__ */ #endif /* CAVIUM_OCTEON_SOC */ } #ifdef CONFIG_32BIT #ifdef CONFIG_CPU_LITTLE_ENDIAN jiffies = jiffies_64; #else jiffies = jiffies_64 + 4; #endif #else jiffies = jiffies_64; #endif SECTIONS { #ifdef CONFIG_BOOT_ELF64 /* Read-only sections, merged into text segment: */ /* . = 0xc000000000000000; */ /* This is the value for an Origin kernel, taken from an IRIX kernel. */ /* . = 0xc00000000001c000; */ /* Set the vaddr for the text segment to a value * >= 0xa800 0000 0001 9000 if no symmon is going to configured * >= 0xa800 0000 0030 0000 otherwise */ /* . = 0xa800000000300000; */ . = 0xffffffff80300000; #endif . = LINKER_LOAD_ADDRESS; /* read-only */ _text = .; /* Text and read-only data */ .text : { TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.text.*) *(.fixup) *(.gnu.warning) } :text = 0 _etext = .; /* End of text section */ /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. */ . = ALIGN(4); .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } EXCEPTION_TABLE(16) /* Exception table for data bus errors */ __dbe_table : { __start___dbe_table = .; *(__dbe_table) __stop___dbe_table = .; } #ifdef CONFIG_CAVIUM_OCTEON_SOC #define NOTES_HEADER #else /* CONFIG_CAVIUM_OCTEON_SOC */ #define NOTES_HEADER :note #endif /* CONFIG_CAVIUM_OCTEON_SOC */ NOTES :text NOTES_HEADER .dummy : { *(.dummy) } :text _sdata = .; /* Start of data section */ RODATA /* writeable */ .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } BUG_TABLE _gp = . + 0x8000; .lit8 : { *(.lit8) } .lit4 : { *(.lit4) } /* We want the small data sections together, so single-instruction offsets can access them all, and initialized data all before uninitialized, so we can shorten the on-disk segment size. */ .sdata : { *(.sdata) } _edata = .; /* End of data section */ /* will be freed after init */ . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) . = ALIGN(4); .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { __mips_machines_start = .; *(.mips.machines.init) __mips_machines_end = .; } /* .exit.text is discarded at runtime, not link time, to deal with * references from .rodata */ .exit.text : { EXIT_TEXT } .exit.data : { EXIT_DATA } #ifdef CONFIG_SMP PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) #endif #ifdef CONFIG_MIPS_ELF_APPENDED_DTB .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { *(.appended_dtb) KEEP(*(.appended_dtb)) } #endif #ifdef CONFIG_RELOCATABLE . = ALIGN(4); .data.reloc : { _relocation_start = .; /* * Space for relocation table * This needs to be filled so that the * relocs tool can overwrite the content. * An invalid value is left at the start of the * section to abort relocation if the table * has not been filled in. */ LONG(0xFFFFFFFF); FILL(0); . += CONFIG_RELOCATION_TABLE_SIZE - 4; _relocation_end = .; } #endif #ifdef CONFIG_MIPS_RAW_APPENDED_DTB __appended_dtb = .; /* leave space for appended DTB */ . += 0x100000; #endif /* * Align to 64K in attempt to eliminate holes before the * .bss..swapper_pg_dir section at the start of .bss. This * also satisfies PAGE_SIZE alignment as the largest page size * allowed is 64K. */ . = ALIGN(0x10000); __init_end = .; /* freed after init ends here */ /* * Force .bss to 64K alignment so that .bss..swapper_pg_dir * gets that alignment. .sbss should be empty, so there will be * no holes after __init_end. */ BSS_SECTION(0, 0x10000, 8) _end = . ; /* These mark the ABI of the kernel for debuggers. */ .mdebug.abi32 : { KEEP(*(.mdebug.abi32)) } .mdebug.abi64 : { KEEP(*(.mdebug.abi64)) } /* This is the MIPS specific mdebug section. */ .mdebug : { *(.mdebug) } STABS_DEBUG DWARF_DEBUG /* These must appear regardless of . */ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } /* Sections to be discarded */ DISCARDS /DISCARD/ : { /* ABI crap starts here */ *(.MIPS.abiflags) *(.MIPS.options) *(.options) *(.pdr) *(.reginfo) *(.eh_frame) } }
AirFortressIlikara/LS2K0300-linux-4.19
14,421
arch/mips/kernel/genex.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2002, 2007 Maciej W. Rozycki * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/war.h> #include <asm/thread_info.h> __INIT /* * General exception vector for all other CPUs. * * Be careful when changing this, it has to be at most 128 bytes * to fit into space reserved for the exception handler. */ NESTED(except_vec3_generic, 0, sp) .set push .set noat #if R5432_CP0_INTERRUPT_WAR mfc0 k0, CP0_INDEX #endif mfc0 k1, CP0_CAUSE andi k1, k1, 0x7c #ifdef CONFIG_64BIT dsll k1, k1, 1 #endif PTR_L k0, exception_handlers(k1) jr k0 .set pop END(except_vec3_generic) /* * General exception handler for CPUs with virtual coherency exception. * * Be careful when changing this, it has to be at most 256 (as a special * exception) bytes to fit into space reserved for the exception handler. */ NESTED(except_vec3_r4000, 0, sp) .set push .set arch=r4000 .set noat mfc0 k1, CP0_CAUSE li k0, 31<<2 andi k1, k1, 0x7c .set push .set noreorder .set nomacro beq k1, k0, handle_vced li k0, 14<<2 beq k1, k0, handle_vcei #ifdef CONFIG_64BIT dsll k1, k1, 1 #endif .set pop PTR_L k0, exception_handlers(k1) jr k0 /* * Big shit, we now may have two dirty primary cache lines for the same * physical address. We can safely invalidate the line pointed to by * c0_badvaddr because after return from this exception handler the * load / store will be re-executed. */ handle_vced: MFC0 k0, CP0_BADVADDR li k1, -4 # Is this ... and k0, k1 # ... really needed? mtc0 zero, CP0_TAGLO cache Index_Store_Tag_D, (k0) cache Hit_Writeback_Inv_SD, (k0) #ifdef CONFIG_PROC_FS PTR_LA k0, vced_count lw k1, (k0) addiu k1, 1 sw k1, (k0) #endif eret handle_vcei: MFC0 k0, CP0_BADVADDR cache Hit_Writeback_Inv_SD, (k0) # also cleans pi #ifdef CONFIG_PROC_FS PTR_LA k0, vcei_count lw k1, (k0) addiu k1, 1 sw k1, (k0) #endif eret .set pop END(except_vec3_r4000) __FINIT .align 5 /* 32 byte rollback region */ LEAF(__r4k_wait) .set push .set noreorder /* start of rollback region */ LONG_L t0, TI_FLAGS($28) nop andi t0, _TIF_NEED_RESCHED bnez t0, 1f nop nop nop #ifdef CONFIG_CPU_MICROMIPS nop nop nop nop #endif .set MIPS_ISA_ARCH_LEVEL_RAW wait /* end of rollback region (the region size must be power of two) */ 1: jr ra nop .set pop END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, \handler MTC0 k0, CP0_EPC .set pop .endm .align 5 BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS /* * Check to see if the interrupted code has just disabled * interrupts and ignore this interrupt for now if so. * * local_irq_disable() disables interrupts and then calls * trace_hardirqs_off() to track the state. If an interrupt is taken * after interrupts are disabled but before the state is updated * it will appear to restore_all that it is incorrectly returning with * interrupts disabled */ .set push .set noat mfc0 k0, CP0_STATUS #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) and k0, ST0_IEP bnez k0, 1f mfc0 k0, CP0_EPC .set noreorder j k0 rfe #else and k0, ST0_IE bnez k0, 1f eret #endif 1: .set pop #endif SAVE_ALL docfi=1 CLI TRACE_IRQS_OFF LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) /* * SAVE_ALL ensures we are using a valid kernel stack for the thread. * Check if we are already using the IRQ stack. */ move s1, sp # Preserve the sp /* Get IRQ stack for this CPU */ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(irq_stack) #else lui k1, %highest(irq_stack) daddiu k1, %higher(irq_stack) dsll k1, 16 daddiu k1, %hi(irq_stack) dsll k1, 16 #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 LONG_L t0, %lo(irq_stack)(k1) # Check if already on IRQ stack PTR_LI t1, ~(_THREAD_SIZE-1) and t1, t1, sp beq t0, t1, 2f /* Switch to IRQ stack */ li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: jal plat_irq_dispatch /* Restore sp */ move sp, s1 j ret_from_irq #ifdef CONFIG_CPU_MICROMIPS nop #endif END(handle_int) __INIT /* * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. * This is a dedicated interrupt exception vector which reduces the * interrupt processing overhead. The jump instruction will be replaced * at the initialization time. * * Be careful when changing this, it has to be at most 128 bytes * to fit into space reserved for the exception handler. */ NESTED(except_vec4, 0, sp) 1: j 1b /* Dummy, will be replaced */ END(except_vec4) /* * EJTAG debug exception handler. * The EJTAG debug exception entry point is 0xbfc00480, which * normally is in the boot PROM, so the boot PROM must do an * unconditional jump to this vector. */ NESTED(except_vec_ejtag_debug, 0, sp) j ejtag_debug_handler #ifdef CONFIG_CPU_MICROMIPS nop #endif END(except_vec_ejtag_debug) __FINIT /* * Vectored interrupt handler. * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 .set push .set noreorder PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ jr v1 FEXPORT(except_vec_vi_ori) ori v0, 0 /* Patched */ .set pop END(except_vec_vi) EXPORT(except_vec_vi_end) /* * Common Vectored Interrupt code * Complete the register saves and invoke the handler which is passed in $v0 */ NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP SAVE_STATIC CLI #ifdef CONFIG_TRACE_IRQFLAGS move s0, v0 TRACE_IRQS_OFF move v0, s0 #endif LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) /* * SAVE_ALL ensures we are using a valid kernel stack for the thread. * Check if we are already using the IRQ stack. */ move s1, sp # Preserve the sp /* Get IRQ stack for this CPU */ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(irq_stack) #else lui k1, %highest(irq_stack) daddiu k1, %higher(irq_stack) dsll k1, 16 daddiu k1, %hi(irq_stack) dsll k1, 16 #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 LONG_L t0, %lo(irq_stack)(k1) # Check if already on IRQ stack PTR_LI t1, ~(_THREAD_SIZE-1) and t1, t1, sp beq t0, t1, 2f /* Switch to IRQ stack */ li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: jalr v0 /* Restore sp */ move sp, s1 j ret_from_irq END(except_vec_vi_handler) /* * EJTAG debug exception handler. */ NESTED(ejtag_debug_handler, PT_SIZE, sp) .set push .set noat MTC0 k0, CP0_DESAVE mfc0 k0, CP0_DEBUG sll k0, k0, 30 # Check for SDBBP. bgez k0, ejtag_return #ifdef CONFIG_SMP 1: PTR_LA k0, ejtag_debug_buffer_spinlock ll k0, 0(k0) bnez k0, 1b PTR_LA k0, ejtag_debug_buffer_spinlock sc k0, 0(k0) beqz k0, 1b # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC sync # else nop # endif PTR_LA k0, ejtag_debug_buffer LONG_S k1, 0(k0) ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG PTR_SRL k1, SMP_CPUID_PTRSHIFT PTR_SLL k1, LONGLOG PTR_LA k0, ejtag_debug_buffer_per_cpu PTR_ADDU k0, k1 PTR_LA k1, ejtag_debug_buffer LONG_L k1, 0(k1) LONG_S k1, 0(k0) PTR_LA k0, ejtag_debug_buffer_spinlock sw zero, 0(k0) #else PTR_LA k0, ejtag_debug_buffer LONG_S k1, 0(k0) #endif SAVE_ALL move a0, sp jal ejtag_exception_handler RESTORE_ALL #ifdef CONFIG_SMP ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG PTR_SRL k1, SMP_CPUID_PTRSHIFT PTR_SLL k1, LONGLOG PTR_LA k0, ejtag_debug_buffer_per_cpu PTR_ADDU k0, k1 LONG_L k1, 0(k0) #else PTR_LA k0, ejtag_debug_buffer LONG_L k1, 0(k0) #endif ejtag_return: back_to_back_c0_hazard MFC0 k0, CP0_DESAVE .set mips32 deret .set pop END(ejtag_debug_handler) /* * This buffer is reserved for the use of the EJTAG debug * handler. */ .data EXPORT(ejtag_debug_buffer) .fill LONGSIZE #ifdef CONFIG_SMP EXPORT(ejtag_debug_buffer_spinlock) .fill LONGSIZE EXPORT(ejtag_debug_buffer_per_cpu) .fill LONGSIZE * NR_CPUS #endif .previous __INIT /* * NMI debug exception handler for MIPS reference boards. * The NMI debug exception entry point is 0xbfc00000, which * normally is in the boot PROM, so the boot PROM must do a * unconditional jump to this vector. */ NESTED(except_vec_nmi, 0, sp) j nmi_handler #ifdef CONFIG_CPU_MICROMIPS nop #endif END(except_vec_nmi) __FINIT NESTED(nmi_handler, PT_SIZE, sp) .cfi_signal_frame .set push .set noat /* * Clear ERL - restore segment mapping * Clear BEV - required for page fault exception handler to work */ mfc0 k0, CP0_STATUS ori k0, k0, ST0_EXL li k1, ~(ST0_BEV | ST0_ERL) and k0, k0, k1 mtc0 k0, CP0_STATUS _ehb SAVE_ALL move a0, sp jal nmi_exception_handler /* nmi_exception_handler never returns */ .set pop END(nmi_handler) .macro __build_clear_none .endm .macro __build_clear_sti TRACE_IRQS_ON STI .endm .macro __build_clear_cli CLI TRACE_IRQS_OFF .endm .macro __build_clear_fpe CLI TRACE_IRQS_OFF .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 .set pop .endm .macro __build_clear_msa_fpe CLI TRACE_IRQS_OFF _cfcmsa a1, MSA_CSR .endm .macro __build_clear_ade MFC0 t0, CP0_BADVADDR PTR_S t0, PT_BVADDR(sp) KMODE .endm .macro __build_clear_gsex MFC0 a1, CP0_GSCAUSE TRACE_IRQS_ON STI .endm .macro __BUILD_silent exception .endm /* Gas tries to parse the PRINT argument as a string containing string escapes and emits bogus warnings if it believes to recognize an unknown escape code. So make the arguments start with an n and gas will believe \n is ok ... */ .macro __BUILD_verbose nexception LONG_L a1, PT_EPC(sp) #ifdef CONFIG_32BIT PRINT("Got \nexception at %08lx\012") #endif #ifdef CONFIG_64BIT PRINT("Got \nexception at %016lx\012") #endif .endm .macro __BUILD_count exception LONG_L t0,exception_count_\exception LONG_ADDIU t0, 1 LONG_S t0,exception_count_\exception .comm exception_count\exception, 8, 8 .endm .macro __BUILD_HANDLER exception handler clear verbose ext .align 5 NESTED(handle_\exception, PT_SIZE, sp) .cfi_signal_frame .set noat SAVE_ALL FEXPORT(handle_\exception\ext) __build_clear_\clear .set at __BUILD_\verbose \exception move a0, sp jal do_\handler j ret_from_exception END(handle_\exception) .endm .macro BUILD_HANDLER exception handler clear verbose __BUILD_HANDLER \exception \handler \clear \verbose _int .endm BUILD_HANDLER adel ade ade silent /* #4 */ BUILD_HANDLER ades ade ade silent /* #5 */ BUILD_HANDLER ibe be cli silent /* #6 */ BUILD_HANDLER dbe be cli silent /* #7 */ BUILD_HANDLER bp bp sti silent /* #9 */ BUILD_HANDLER ri ri sti silent /* #10 */ BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */ #ifdef CONFIG_CPU_LOONGSON3 BUILD_HANDLER gsex gsex gsex silent /* #16 */ #else BUILD_HANDLER ftlb ftlb none silent /* #16 */ #endif BUILD_HANDLER msa msa sti silent /* #21 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS /* * For watch, interrupts will be enabled after the watch * registers are read. */ BUILD_HANDLER watch watch cli silent /* #23 */ #else BUILD_HANDLER watch watch sti verbose /* #23 */ #endif BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ BUILD_HANDLER mt mt sti silent /* #25 */ BUILD_HANDLER dsp dsp sti silent /* #26 */ BUILD_HANDLER reserved reserved sti verbose /* others */ .align 5 LEAF(handle_ri_rdhwr_tlbp) .set push .set noat .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 or k1, k0 MTC0 k1, CP0_ENTRYHI mtc0_tlbw_hazard tlbp tlb_probe_hazard mfc0 k1, CP0_INDEX .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ END(handle_ri_rdhwr_tlbp) LEAF(handle_ri_rdhwr) .set push .set noat .set noreorder /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ MFC0 k1, CP0_EPC #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) and k0, k1, 1 beqz k0, 1f xor k1, k0 lhu k0, (k1) lhu k1, 2(k1) ins k1, k0, 16, 16 lui k0, 0x007d b docheck ori k0, 0x6b3c 1: lui k0, 0x7c03 lw k1, (k1) ori k0, 0xe83b #else andi k0, k1, 1 bnez k0, handle_ri lui k0, 0x7c03 lw k1, (k1) ori k0, 0xe83b #endif .set reorder docheck: bne k0, k1, handle_ri /* if not ours */ isrdhwr: /* The insn is rdhwr. No need to check CAUSE.BD here. */ get_saved_sp /* k1 := current_thread_info */ .set noreorder MFC0 k0, CP0_EPC #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) LONG_ADDIU k0, 4 jr k0 rfe #else #ifndef CONFIG_CPU_DADDI_WORKAROUNDS LONG_ADDIU k0, 4 /* stall on $k0 */ #else .set at=v1 LONG_ADDIU k0, 4 .set noat #endif MTC0 k0, CP0_EPC /* I hope three instructions between MTC0 and ERET are enough... */ ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) .set arch=r4000 eret .set mips0 #endif .set pop END(handle_ri_rdhwr) #ifdef CONFIG_64BIT /* A temporary overflow handler used by check_daddi(). */ __INIT BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ #endif
AirFortressIlikara/LS2K0300-linux-4.19
12,426
arch/mips/kernel/cps-vec.S
/* * Copyright (C) 2013 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> #include <asm/eva.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/pm.h> #define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 .extern mips_cm_base .set noreorder #ifdef CONFIG_64BIT # define STATUS_BITDEPS ST0_KX #else # define STATUS_BITDEPS 0 #endif #ifdef CONFIG_MIPS_CPS_NS16550 #define DUMP_EXCEP(name) \ PTR_LA a0, 8f; \ jal mips_cps_bev_dump; \ nop; \ TEXT(name) #else /* !CONFIG_MIPS_CPS_NS16550 */ #define DUMP_EXCEP(name) #endif /* !CONFIG_MIPS_CPS_NS16550 */ /* * Set dest to non-zero if the core supports the MT ASE, else zero. If * MT is not supported then branch to nomt. */ .macro has_mt dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt nop .endm /* * Set dest to non-zero if the core supports MIPSr6 multithreading * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then * branch to nomt. */ .macro has_vp dest, nomt mfc0 \dest, CP0_CONFIG, 1 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 2 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 3 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 4 bgez \dest, \nomt mfc0 \dest, CP0_CONFIG, 5 andi \dest, \dest, MIPS_CONF5_VP beqz \dest, \nomt nop .endm /* Calculate an uncached address for the CM GCRs */ .macro cmgcrb dest .set push .set noat MFC0 $1, CP0_CMGCRBASE PTR_SLL $1, $1, 4 PTR_LI \dest, UNCAC_BASE PTR_ADDU \dest, \dest, $1 .set pop .endm .section .text.cps-vec .balign 0x1000 LEAF(mips_cps_core_entry) /* * These first 4 bytes will be patched by cps_smp_setup to load the * CCA to use into register s0. */ .word 0 /* Check whether we're here due to an NMI */ mfc0 k0, CP0_STATUS and k0, k0, ST0_NMI beqz k0, not_nmi nop /* This is an NMI */ PTR_LA k0, nmi_handler jr k0 nop not_nmi: /* Setup Cause */ li t0, CAUSEF_IV mtc0 t0, CP0_CAUSE /* Setup Status */ li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS mtc0 t0, CP0_STATUS /* Skip cache & coherence setup if we're already coherent */ cmgcrb v1 lw s7, GCR_CL_COHERENCE_OFS(v1) bnez s7, 1f nop /* Initialize the L1 caches */ jal mips_cps_cache_init nop /* Enter the coherent domain */ li t0, 0xff sw t0, GCR_CL_COHERENCE_OFS(v1) ehb /* Set Kseg0 CCA to that in s0 */ 1: mfc0 t0, CP0_CONFIG ori t0, 0x7 xori t0, 0x7 or t0, t0, s0 mtc0 t0, CP0_CONFIG ehb /* Jump to kseg0 */ PTR_LA t0, 1f jr t0 nop /* * We're up, cached & coherent. Perform any EVA initialization necessary * before we access memory. */ 1: eva_init /* Retrieve boot configuration pointers */ jal mips_cps_get_bootcfg nop /* Skip core-level init if we started up coherent */ bnez s7, 1f nop /* Perform any further required core-level initialisation */ jal mips_cps_core_init nop /* * Boot any other VPEs within this core that should be online, and * deactivate this VPE if it should be offline. */ move a1, t9 jal mips_cps_boot_vpes move a0, v0 /* Off we go! */ 1: PTR_L t1, VPEBOOTCFG_PC(v1) PTR_L gp, VPEBOOTCFG_GP(v1) PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop END(mips_cps_core_entry) .org 0x200 LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . nop END(excep_tlbfill) .org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . nop END(excep_xtlbfill) .org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . nop END(excep_cache) .org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . nop END(excep_genex) .org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . nop END(excep_intex) .org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) LEAF(mips_cps_core_init) #ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f .set push .set MIPS_ISA_LEVEL_RAW .set mt /* Only allow 1 TC per VPE to execute... */ dmt /* ...and for the moment only 1 VPE */ dvpe PTR_LA t1, 1f jr.hb t1 nop /* Enter VPE configuration state */ 1: mfc0 t0, CP0_MVPCONTROL ori t0, t0, MVPCONTROL_VPC mtc0 t0, CP0_MVPCONTROL /* Retrieve the number of VPEs within the core */ mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) addiu ta3, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f nop /* Loop through each VPE within this core */ li ta1, 1 1: /* Operate on the appropriate TC */ mtc0 ta1, CP0_VPECONTROL ehb /* Bind TC to VPE (1:1 TC:VPE mapping) */ mttc0 ta1, CP0_TCBIND /* Set exclusive TC, non-active, master */ li t0, VPECONF0_MVP sll t1, ta1, VPECONF0_XTC_SHIFT or t0, t0, t1 mttc0 t0, CP0_VPECONF0 /* Set TC non-active, non-allocatable */ mttc0 zero, CP0_TCSTATUS /* Set TC halted */ li t0, TCHALT_H mttc0 t0, CP0_TCHALT /* Next VPE */ addiu ta1, ta1, 1 slt t0, ta1, ta3 bnez t0, 1b nop /* Leave VPE configuration state */ 2: mfc0 t0, CP0_MVPCONTROL xori t0, t0, MVPCONTROL_VPC mtc0 t0, CP0_MVPCONTROL 3: .set pop #endif jr ra nop END(mips_cps_core_init) /** * mips_cps_get_bootcfg() - retrieve boot configuration pointers * * Returns: pointer to struct core_boot_config in v0, pointer to * struct vpe_boot_config in v1, VPE ID in t9 */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ cmgcrb t0 lw t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 PTR_LA t1, mips_cps_core_bootcfg PTR_L t1, 0(t1) PTR_ADDU v0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 #if defined(CONFIG_CPU_MIPSR6) has_vp ta2, 1f /* * Assume non-contiguous numbering. Perhaps some day we'll need * to handle contiguous VP numbering, but no such systems yet * exist. */ mfc0 t9, CP0_GLOBALNUMBER andi t9, t9, MIPS_GLOBALNUMBER_VP #elif defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 srl t1, t1, MVPCONF0_PVPE_SHIFT andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT addiu t1, t1, 1 /* Calculate a mask for the VPE ID from EBase.CPUNum */ clz t1, t1 li t2, 31 subu t1, t2, t1 li t2, 1 sll t1, t2, t1 addiu t1, t1, -1 /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 #endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE mul v1, t9, t1 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) PTR_ADDU v1, v1, ta3 jr ra nop END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) lw ta2, COREBOOTCFG_VPEMASK(a0) PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) #if defined(CONFIG_CPU_MIPSR6) has_vp t0, 5f /* Find base address of CPC */ cmgcrb t3 PTR_L t1, GCR_CPC_BASE_OFS(t3) PTR_LI t2, ~0x7fff and t1, t1, t2 PTR_LI t2, UNCAC_BASE PTR_ADD t1, t1, t2 /* Start any other VPs that ought to be running */ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) /* Ensure this VP stops running if it shouldn't be */ not ta2 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) ehb #elif defined(CONFIG_MIPS_MT) /* If the core doesn't support MT then return */ has_mt t0, 5f /* Enter VPE configuration state */ .set push .set MIPS_ISA_LEVEL_RAW .set mt dvpe .set pop PTR_LA t1, 1f jr.hb t1 nop 1: mfc0 t1, CP0_MVPCONTROL ori t1, t1, MVPCONTROL_VPC mtc0 t1, CP0_MVPCONTROL ehb /* Loop through each VPE */ move t8, ta2 li ta1, 0 /* Check whether the VPE should be running. If not, skip it */ 1: andi t0, ta2, 1 beqz t0, 2f nop /* Operate on the appropriate TC */ mfc0 t0, CP0_VPECONTROL ori t0, t0, VPECONTROL_TARGTC xori t0, t0, VPECONTROL_TARGTC or t0, t0, ta1 mtc0 t0, CP0_VPECONTROL ehb .set push .set MIPS_ISA_LEVEL_RAW .set mt /* Skip the VPE if its TC is not halted */ mftc0 t0, CP0_TCHALT beqz t0, 2f nop /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE mul t0, t0, ta1 addu t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) mttc0 t1, CP0_TCRESTART /* Set the TC stack pointer */ lw t1, VPEBOOTCFG_SP(t0) mttgpr t1, sp /* Set the TC global pointer */ lw t1, VPEBOOTCFG_GP(t0) mttgpr t1, gp /* Copy config from this VPE */ mfc0 t0, CP0_CONFIG mttc0 t0, CP0_CONFIG /* * Copy the EVA config from this VPE if the CPU supports it. * CONFIG3 must exist to be running MT startup - just read it. */ mfc0 t0, CP0_CONFIG, 3 and t0, t0, MIPS_CONF3_SC beqz t0, 3f nop mfc0 t0, CP0_SEGCTL0 mttc0 t0, CP0_SEGCTL0 mfc0 t0, CP0_SEGCTL1 mttc0 t0, CP0_SEGCTL1 mfc0 t0, CP0_SEGCTL2 mttc0 t0, CP0_SEGCTL2 3: /* Ensure no software interrupts are pending */ mttc0 zero, CP0_CAUSE mttc0 zero, CP0_STATUS /* Set TC active, not interrupt exempt */ mftc0 t0, CP0_TCSTATUS li t1, ~TCSTATUS_IXMT and t0, t0, t1 ori t0, t0, TCSTATUS_A mttc0 t0, CP0_TCSTATUS /* Clear the TC halt bit */ mttc0 zero, CP0_TCHALT /* Set VPE active */ mftc0 t0, CP0_VPECONF0 ori t0, t0, VPECONF0_VPA mttc0 t0, CP0_VPECONF0 /* Next VPE */ 2: srl ta2, ta2, 1 addiu ta1, ta1, 1 bnez ta2, 1b nop /* Leave VPE configuration state */ mfc0 t1, CP0_MVPCONTROL xori t1, t1, MVPCONTROL_VPC mtc0 t1, CP0_MVPCONTROL ehb evpe .set pop /* Check whether this VPE is meant to be running */ li t0, 1 sll t0, t0, a1 and t0, t0, t8 bnez t0, 2f nop /* This VPE should be offline, halt the TC */ li t0, TCHALT_H mtc0 t0, CP0_TCHALT PTR_LA t0, 1f 1: jr.hb t0 nop 2: #endif /* CONFIG_MIPS_MT_SMP */ /* Return */ 5: jr ra nop END(mips_cps_boot_vpes) LEAF(mips_cps_cache_init) /* * Clear the bits used to index the caches. Note that the architecture * dictates that writing to any of TagLo or TagHi selects 0 or 2 should * be valid for all MIPS32 CPUs, even those for which said writes are * unnecessary. */ mtc0 zero, CP0_TAGLO, 0 mtc0 zero, CP0_TAGHI, 0 mtc0 zero, CP0_TAGLO, 2 mtc0 zero, CP0_TAGHI, 2 ehb /* Primary cache configuration is indicated by Config1 */ mfc0 v0, CP0_CONFIG, 1 /* Detect I-cache line size */ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ beqz t0, icache_done li t1, 2 sllv t0, t1, t0 /* Detect I-cache size */ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ xori t2, t1, 0x7 beqz t2, 1f li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 li a0, CKSEG0 PTR_ADD a1, a0, t1 1: cache Index_Store_Tag_I, 0(a0) PTR_ADD a0, a0, t0 bne a0, a1, 1b nop icache_done: /* Detect D-cache line size */ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ beqz t0, dcache_done li t1, 2 sllv t0, t1, t0 /* Detect D-cache size */ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ xori t2, t1, 0x7 beqz t2, 1f li t3, 32 addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 li a0, CKSEG0 PTR_ADDU a1, a0, t1 PTR_SUBU a1, a1, t0 1: cache Index_Store_Tag_D, 0(a0) bne a0, a1, 1b PTR_ADD a0, a0, t0 dcache_done: jr ra nop END(mips_cps_cache_init) #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ .macro psstate dest .set push .set noat lw $1, TI_CPU(gp) sll $1, $1, LONGLOG PTR_LA \dest, __per_cpu_offset addu $1, $1, \dest lw $1, 0($1) PTR_LA \dest, cps_cpu_state addu \dest, \dest, $1 .set pop .endm LEAF(mips_cps_pm_save) /* Save CPU state */ SUSPEND_SAVE_REGS psstate t1 SUSPEND_SAVE_STATIC jr v0 nop END(mips_cps_pm_save) LEAF(mips_cps_pm_restore) /* Restore CPU state */ psstate t1 RESUME_RESTORE_STATIC RESUME_RESTORE_REGS_RETURN END(mips_cps_pm_restore) #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
AirFortressIlikara/LS2K0300-linux-4.19
13,500
arch/mips/kernel/scall32-o32.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/isadep.h> #include <asm/sysmips.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/war.h> #include <asm/asm-offsets.h> /* Highest syscall used of any syscall flavour */ #define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at lw t1, PT_EPC(sp) # skip syscall on return addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) sw a3, PT_R26(sp) # save a3 for syscall restarting /* * More than four arguments. Try to deal with it by copying the * stack arguments from the user stack to the kernel stack. * This Sucks (TM). */ lw t0, PT_R29(sp) # get old user stack pointer /* * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ lw t5, TI_ADDR_LIMIT($28) addu t4, t0, 32 and t5, t4 bltz t5, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. */ .set push .set noreorder .set nomacro load_a4: user_lw(t5, 16(t0)) # argument #5 from usp load_a5: user_lw(t6, 20(t0)) # argument #6 from usp load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp sw t7, 24(sp) # argument #7 to ksp sw t8, 28(sp) # argument #8 to ksp .set pop .section __ex_table,"a" PTR load_a4, bad_stack_a4 PTR load_a5, bad_stack_a5 PTR load_a6, bad_stack_a6 PTR load_a7, bad_stack_a7 .previous lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes syscall_common: subu v0, v0, __NR_O32_Linux # check syscall number sltiu t0, v0, __NR_O32_Linux_syscalls + 1 beqz t0, illegal_syscall sll t0, v0, 2 la t1, sys_call_table addu t1, t0 lw t2, (t1) # syscall routine beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sw t0, PT_R7(sp) # set error flag beqz t0, 1f lw t1, PT_R2(sp) # syscall number negu v0 # error sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result o32_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ syscall_trace_entry: SAVE_STATIC move a0, sp /* * syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 */ move a1, v0 subu t2, v0, __NR_O32_Linux bnez t2, 1f /* __NR_syscall at offset 0 */ lw a1, PT_R4(sp) 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC lw v0, PT_R2(sp) # Restore syscall (maybe modified) lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) j syscall_common 1: j syscall_exit /* ------------------------------------------------------------------------ */ /* * Our open-coded access area sanity test for the stack pointer * failed. We probably should handle this case a bit more drastic. */ bad_stack: li v0, EFAULT sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit bad_stack_a4: li t5, 0 b load_a5 bad_stack_a5: li t6, 0 b load_a6 bad_stack_a6: li t7, 0 b load_a7 bad_stack_a7: li t8, 0 b loads_done /* * The system call does not exist in this kernel */ illegal_syscall: li v0, ENOSYS # error sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit END(handle_sys) LEAF(sys_syscall) subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls + 1 beqz t0, einval # do not recurse sll t1, t0, 2 beqz v0, einval lw t2, sys_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 move a2, a3 lw a3, 16(sp) lw t4, 20(sp) lw t5, 24(sp) lw t6, 28(sp) sw t4, 16(sp) sw t5, 20(sp) sw t6, 24(sp) jr t2 /* Unreached */ einval: li v0, -ENOSYS jr ra END(sys_syscall) .align 2 .type sys_call_table, @object EXPORT(sys_call_table) PTR sys_syscall /* 4000 */ PTR sys_exit PTR __sys_fork PTR sys_read PTR sys_write PTR sys_open /* 4005 */ PTR sys_close PTR sys_waitpid PTR sys_creat PTR sys_link PTR sys_unlink /* 4010 */ PTR sys_execve PTR sys_chdir PTR sys_time PTR sys_mknod PTR sys_chmod /* 4015 */ PTR sys_lchown PTR sys_ni_syscall PTR sys_ni_syscall /* was sys_stat */ PTR sys_lseek PTR sys_getpid /* 4020 */ PTR sys_mount PTR sys_oldumount PTR sys_setuid PTR sys_getuid PTR sys_stime /* 4025 */ PTR sys_ptrace PTR sys_alarm PTR sys_ni_syscall /* was sys_fstat */ PTR sys_pause PTR sys_utime /* 4030 */ PTR sys_ni_syscall PTR sys_ni_syscall PTR sys_access PTR sys_nice PTR sys_ni_syscall /* 4035 */ PTR sys_sync PTR sys_kill PTR sys_rename PTR sys_mkdir PTR sys_rmdir /* 4040 */ PTR sys_dup PTR sysm_pipe PTR sys_times PTR sys_ni_syscall PTR sys_brk /* 4045 */ PTR sys_setgid PTR sys_getgid PTR sys_ni_syscall /* was signal(2) */ PTR sys_geteuid PTR sys_getegid /* 4050 */ PTR sys_acct PTR sys_umount PTR sys_ni_syscall PTR sys_ioctl PTR sys_fcntl /* 4055 */ PTR sys_ni_syscall PTR sys_setpgid PTR sys_ni_syscall PTR sys_olduname PTR sys_umask /* 4060 */ PTR sys_chroot PTR sys_ustat PTR sys_dup2 PTR sys_getppid PTR sys_getpgrp /* 4065 */ PTR sys_setsid PTR sys_sigaction PTR sys_sgetmask PTR sys_ssetmask PTR sys_setreuid /* 4070 */ PTR sys_setregid PTR sys_sigsuspend PTR sys_sigpending PTR sys_sethostname PTR sys_setrlimit /* 4075 */ PTR sys_getrlimit PTR sys_getrusage PTR sys_gettimeofday PTR sys_settimeofday PTR sys_getgroups /* 4080 */ PTR sys_setgroups PTR sys_ni_syscall /* old_select */ PTR sys_symlink PTR sys_ni_syscall /* was sys_lstat */ PTR sys_readlink /* 4085 */ PTR sys_uselib PTR sys_swapon PTR sys_reboot PTR sys_old_readdir PTR sys_mips_mmap /* 4090 */ PTR sys_munmap PTR sys_truncate PTR sys_ftruncate PTR sys_fchmod PTR sys_fchown /* 4095 */ PTR sys_getpriority PTR sys_setpriority PTR sys_ni_syscall PTR sys_statfs PTR sys_fstatfs /* 4100 */ PTR sys_ni_syscall /* was ioperm(2) */ PTR sys_socketcall PTR sys_syslog PTR sys_setitimer PTR sys_getitimer /* 4105 */ PTR sys_newstat PTR sys_newlstat PTR sys_newfstat PTR sys_uname PTR sys_ni_syscall /* 4110 was iopl(2) */ PTR sys_vhangup PTR sys_ni_syscall /* was sys_idle() */ PTR sys_ni_syscall /* was sys_vm86 */ PTR sys_wait4 PTR sys_swapoff /* 4115 */ PTR sys_sysinfo PTR sys_ipc PTR sys_fsync PTR sys_sigreturn PTR __sys_clone /* 4120 */ PTR sys_setdomainname PTR sys_newuname PTR sys_ni_syscall /* sys_modify_ldt */ PTR sys_adjtimex PTR sys_mprotect /* 4125 */ PTR sys_sigprocmask PTR sys_ni_syscall /* was create_module */ PTR sys_init_module PTR sys_delete_module PTR sys_ni_syscall /* 4130 was get_kernel_syms */ PTR sys_quotactl PTR sys_getpgid PTR sys_fchdir PTR sys_bdflush PTR sys_sysfs /* 4135 */ PTR sys_personality PTR sys_ni_syscall /* for afs_syscall */ PTR sys_setfsuid PTR sys_setfsgid PTR sys_llseek /* 4140 */ PTR sys_getdents PTR sys_select PTR sys_flock PTR sys_msync PTR sys_readv /* 4145 */ PTR sys_writev PTR sys_cacheflush PTR sys_cachectl PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync PTR sys_sysctl PTR sys_mlock PTR sys_munlock /* 4155 */ PTR sys_mlockall PTR sys_munlockall PTR sys_sched_setparam PTR sys_sched_getparam PTR sys_sched_setscheduler /* 4160 */ PTR sys_sched_getscheduler PTR sys_sched_yield PTR sys_sched_get_priority_max PTR sys_sched_get_priority_min PTR sys_sched_rr_get_interval /* 4165 */ PTR sys_nanosleep PTR sys_mremap PTR sys_accept PTR sys_bind PTR sys_connect /* 4170 */ PTR sys_getpeername PTR sys_getsockname PTR sys_getsockopt PTR sys_listen PTR sys_recv /* 4175 */ PTR sys_recvfrom PTR sys_recvmsg PTR sys_send PTR sys_sendmsg PTR sys_sendto /* 4180 */ PTR sys_setsockopt PTR sys_shutdown PTR sys_socket PTR sys_socketpair PTR sys_setresuid /* 4185 */ PTR sys_getresuid PTR sys_ni_syscall /* was sys_query_module */ PTR sys_poll PTR sys_ni_syscall /* was nfsservctl */ PTR sys_setresgid /* 4190 */ PTR sys_getresgid PTR sys_prctl PTR sys_rt_sigreturn PTR sys_rt_sigaction PTR sys_rt_sigprocmask /* 4195 */ PTR sys_rt_sigpending PTR sys_rt_sigtimedwait PTR sys_rt_sigqueueinfo PTR sys_rt_sigsuspend PTR sys_pread64 /* 4200 */ PTR sys_pwrite64 PTR sys_chown PTR sys_getcwd PTR sys_capget PTR sys_capset /* 4205 */ PTR sys_sigaltstack PTR sys_sendfile PTR sys_ni_syscall PTR sys_ni_syscall PTR sys_mips_mmap2 /* 4210 */ PTR sys_truncate64 PTR sys_ftruncate64 PTR sys_stat64 PTR sys_lstat64 PTR sys_fstat64 /* 4215 */ PTR sys_pivot_root PTR sys_mincore PTR sys_madvise PTR sys_getdents64 PTR sys_fcntl64 /* 4220 */ PTR sys_ni_syscall PTR sys_gettid PTR sys_readahead PTR sys_setxattr PTR sys_lsetxattr /* 4225 */ PTR sys_fsetxattr PTR sys_getxattr PTR sys_lgetxattr PTR sys_fgetxattr PTR sys_listxattr /* 4230 */ PTR sys_llistxattr PTR sys_flistxattr PTR sys_removexattr PTR sys_lremovexattr PTR sys_fremovexattr /* 4235 */ PTR sys_tkill PTR sys_sendfile64 PTR sys_futex #ifdef CONFIG_MIPS_MT_FPAFF /* * For FPU affinity scheduling on MIPS MT processors, we need to * intercept sys_sched_xxxaffinity() calls until we get a proper hook * in kernel/sched/core.c. Considered only temporary we only support * these hooks for the 32-bit kernel - there is no MIPS64 MT processor * atm. */ PTR mipsmt_sys_sched_setaffinity PTR mipsmt_sys_sched_getaffinity #else PTR sys_sched_setaffinity PTR sys_sched_getaffinity /* 4240 */ #endif /* CONFIG_MIPS_MT_FPAFF */ PTR sys_io_setup PTR sys_io_destroy PTR sys_io_getevents PTR sys_io_submit PTR sys_io_cancel /* 4245 */ PTR sys_exit_group PTR sys_lookup_dcookie PTR sys_epoll_create PTR sys_epoll_ctl PTR sys_epoll_wait /* 4250 */ PTR sys_remap_file_pages PTR sys_set_tid_address PTR sys_restart_syscall PTR sys_fadvise64_64 PTR sys_statfs64 /* 4255 */ PTR sys_fstatfs64 PTR sys_timer_create PTR sys_timer_settime PTR sys_timer_gettime PTR sys_timer_getoverrun /* 4260 */ PTR sys_timer_delete PTR sys_clock_settime PTR sys_clock_gettime PTR sys_clock_getres PTR sys_clock_nanosleep /* 4265 */ PTR sys_tgkill PTR sys_utimes PTR sys_mbind PTR sys_get_mempolicy PTR sys_set_mempolicy /* 4270 */ PTR sys_mq_open PTR sys_mq_unlink PTR sys_mq_timedsend PTR sys_mq_timedreceive PTR sys_mq_notify /* 4275 */ PTR sys_mq_getsetattr PTR sys_ni_syscall /* sys_vserver */ PTR sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ PTR sys_request_key PTR sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ PTR sys_inotify_rm_watch PTR sys_migrate_pages PTR sys_openat PTR sys_mkdirat PTR sys_mknodat /* 4290 */ PTR sys_fchownat PTR sys_futimesat PTR sys_fstatat64 PTR sys_unlinkat PTR sys_renameat /* 4295 */ PTR sys_linkat PTR sys_symlinkat PTR sys_readlinkat PTR sys_fchmodat PTR sys_faccessat /* 4300 */ PTR sys_pselect6 PTR sys_ppoll PTR sys_unshare PTR sys_splice PTR sys_sync_file_range /* 4305 */ PTR sys_tee PTR sys_vmsplice PTR sys_move_pages PTR sys_set_robust_list PTR sys_get_robust_list /* 4310 */ PTR sys_kexec_load PTR sys_getcpu PTR sys_epoll_pwait PTR sys_ioprio_set PTR sys_ioprio_get /* 4315 */ PTR sys_utimensat PTR sys_signalfd PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate /* 4320 */ PTR sys_timerfd_create PTR sys_timerfd_gettime PTR sys_timerfd_settime PTR sys_signalfd4 PTR sys_eventfd2 /* 4325 */ PTR sys_epoll_create1 PTR sys_dup3 PTR sys_pipe2 PTR sys_inotify_init1 PTR sys_preadv /* 4330 */ PTR sys_pwritev PTR sys_rt_tgsigqueueinfo PTR sys_perf_event_open PTR sys_accept4 PTR sys_recvmmsg /* 4335 */ PTR sys_fanotify_init PTR sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR sys_open_by_handle_at /* 4340 */ PTR sys_clock_adjtime PTR sys_syncfs PTR sys_sendmmsg PTR sys_setns PTR sys_process_vm_readv /* 4345 */ PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR sys_execveat PTR sys_userfaultfd PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 4360 */ PTR sys_preadv2 PTR sys_pwritev2 PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ PTR sys_statx PTR sys_rseq PTR sys_io_pgetevents
AirFortressIlikara/LS2K0300-linux-4.19
12,534
arch/mips/cavium-octeon/octeon-memcpy.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Unified implementation of memcpy, memmove and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde * * Mnemonic names for arguments to memcpy/__copy_user */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define dst a0 #define src a1 #define len a2 /* * Spec * * memcpy copies len bytes from src to dst and sets v0 to dst. * It assumes that * - src and dst don't overlap * - src is readable * - dst is writable * memcpy uses the standard calling convention * * __copy_user copies up to len bytes from src to dst and sets a2 (len) to * the number of uncopied bytes due to an exception caused by a read or write. * __copy_user assumes that src and dst don't overlap, and that the call is * implementing one of the following: * copy_to_user * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) * __copy_user uses a non-standard calling convention; see * arch/mips/include/asm/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent * leaking information to user mode programs. */ /* * Implementation */ /* * The exception handler for loads requires that: * 1- AT contain the address of the byte just past the end of the source * of the copy, * 2- src_entry <= src < AT, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. * * For __rmemcpy and memmove an exception is always a kernel bug, therefore * they're not protected. */ #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous /* * Only on the 64-bit kernel we can made use of 64-bit registers. */ #define LOAD ld #define LOADL ldl #define LOADR ldr #define STOREL sdl #define STORER sdr #define STORE sd #define ADD daddu #define SUB dsubu #define SRL dsrl #define SRA dsra #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define UNIT(unit) FIRST(unit) #define ADDRMASK (NBYTES-1) .text .set noreorder .set noat /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. * memcpy sets v0 to dst. */ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ __memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) /* * Note: dst & src may be unaligned, len may be 0 * Temps */ # # Octeon doesn't care if the destination is unaligned. The hardware # can fix it faster than we can special case the assembly. # pref 0, 0(src) sltu t0, len, NBYTES # Check if < 1 word bnez t0, copy_bytes_checklen and t0, src, ADDRMASK # Check if src unaligned bnez t0, src_unaligned sltu t0, len, 4*NBYTES # Check if < 4 words bnez t0, less_than_4units sltu t0, len, 8*NBYTES # Check if < 8 words bnez t0, less_than_8units sltu t0, len, 16*NBYTES # Check if < 16 words bnez t0, cleanup_both_aligned sltu t0, len, 128+1 # Check if len < 129 bnez t0, 1f # Skip prefetch if len is too short sltu t0, len, 256+1 # Check if len < 257 bnez t0, 1f # Skip prefetch if len is too short pref 0, 128(src) # We must not prefetch invalid addresses # # This is where we loop if there is more than 128 bytes left 2: pref 0, 256(src) # We must not prefetch invalid addresses # # This is where we loop if we can't prefetch anymore 1: EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 16*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p16u) EXC( STORE t1, UNIT(1)(dst), s_exc_p15u) EXC( STORE t2, UNIT(2)(dst), s_exc_p14u) EXC( STORE t3, UNIT(3)(dst), s_exc_p13u) EXC( LOAD t0, UNIT(4)(src), l_exc_copy) EXC( LOAD t1, UNIT(5)(src), l_exc_copy) EXC( LOAD t2, UNIT(6)(src), l_exc_copy) EXC( LOAD t3, UNIT(7)(src), l_exc_copy) EXC( STORE t0, UNIT(4)(dst), s_exc_p12u) EXC( STORE t1, UNIT(5)(dst), s_exc_p11u) EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) ADD src, src, 16*NBYTES EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) ADD dst, dst, 16*NBYTES EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u) sltu t0, len, 256+1 # See if we can prefetch more beqz t0, 2b sltu t0, len, 128 # See if we can loop more time beqz t0, 1b nop # # Jump here if there are less than 16*NBYTES left. # cleanup_both_aligned: beqz len, done sltu t0, len, 8*NBYTES bnez t0, less_than_8units nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 8*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) EXC( STORE t2, UNIT(2)(dst), s_exc_p6u) EXC( STORE t3, UNIT(3)(dst), s_exc_p5u) EXC( LOAD t0, UNIT(4)(src), l_exc_copy) EXC( LOAD t1, UNIT(5)(src), l_exc_copy) EXC( LOAD t2, UNIT(6)(src), l_exc_copy) EXC( LOAD t3, UNIT(7)(src), l_exc_copy) EXC( STORE t0, UNIT(4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(5)(dst), s_exc_p3u) EXC( STORE t2, UNIT(6)(dst), s_exc_p2u) EXC( STORE t3, UNIT(7)(dst), s_exc_p1u) ADD src, src, 8*NBYTES beqz len, done ADD dst, dst, 8*NBYTES # # Jump here if there are less than 8*NBYTES left. # less_than_8units: sltu t0, len, 4*NBYTES bnez t0, less_than_4units nop EXC( LOAD t0, UNIT(0)(src), l_exc) EXC( LOAD t1, UNIT(1)(src), l_exc_copy) EXC( LOAD t2, UNIT(2)(src), l_exc_copy) EXC( LOAD t3, UNIT(3)(src), l_exc_copy) SUB len, len, 4*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) ADD src, src, 4*NBYTES beqz len, done ADD dst, dst, 4*NBYTES # # Jump here if there are less than 4*NBYTES left. This means # we may need to copy up to 3 NBYTES words. # less_than_4units: sltu t0, len, 1*NBYTES bnez t0, copy_bytes_checklen nop # # 1) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bnez t1, copy_bytes_checklen ADD dst, dst, NBYTES # # 2) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES sltu t1, len, 8 EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bnez t1, copy_bytes_checklen ADD dst, dst, NBYTES # # 3) Copy NBYTES, then check length again # EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES ADD src, src, NBYTES ADD dst, dst, NBYTES b copy_bytes_checklen EXC( STORE t0, -8(dst), s_exc_p1u) src_unaligned: #define rem t8 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter beqz t0, cleanup_src_unaligned and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ EXC( LDFIRST t0, FIRST(0)(src), l_exc) EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) SUB len, len, 4*NBYTES EXC( LDREST t0, REST(0)(src), l_exc_copy) EXC( LDREST t1, REST(1)(src), l_exc_copy) EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) EXC( LDREST t2, REST(2)(src), l_exc_copy) EXC( LDREST t3, REST(3)(src), l_exc_copy) ADD src, src, 4*NBYTES EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) bne len, rem, 1b ADD dst, dst, 4*NBYTES cleanup_src_unaligned: beqz len, done and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, copy_bytes nop 1: EXC( LDFIRST t0, FIRST(0)(src), l_exc) EXC( LDREST t0, REST(0)(src), l_exc_copy) SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc_p1u) ADD src, src, NBYTES bne len, rem, 1b ADD dst, dst, NBYTES copy_bytes_checklen: beqz len, done nop copy_bytes: /* 0 < len < NBYTES */ #define COPY_BYTE(N) \ EXC( lb t0, N(src), l_exc); \ SUB len, len, 1; \ beqz len, done; \ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(0) COPY_BYTE(1) COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) EXC( lb t0, NBYTES-2(src), l_exc) SUB len, len, 1 jr ra EXC( sb t0, NBYTES-2(dst), s_exc_p1) done: jr ra nop END(memcpy) l_exc_copy_rewind16: /* Rewind src and dst by 16*NBYTES for l_exc_copy */ SUB src, src, 16*NBYTES SUB dst, dst, 16*NBYTES l_exc_copy: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) 1: EXC( lb t1, 0(src), l_exc) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user bne src, t0, 1b ADD dst, dst, 1 l_exc: LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address SUB len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC(n) \ s_exc_p ## n ## u: \ jr ra; \ ADD len, len, n*NBYTES SEXC(16) SEXC(15) SEXC(14) SEXC(13) SEXC(12) SEXC(11) SEXC(10) SEXC(9) SEXC(8) SEXC(7) SEXC(6) SEXC(5) SEXC(4) SEXC(3) SEXC(2) SEXC(1) s_exc_p1: jr ra ADD len, len, 1 s_exc: jr ra nop .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADD t0, a0, a2 ADD t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, __memcpy move v0, a0 /* return value */ beqz a2, r_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, r_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len r_end_bytes: lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 bnez a2, r_end_bytes SUB a0, a0, 0x1 r_out: jr ra move a2, zero r_end_bytes_up: lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 bnez a2, r_end_bytes_up ADD a0, a0, 0x1 jr ra move a2, zero END(__rmemcpy)
AirFortressIlikara/LS2K0300-linux-4.19
1,847
arch/mips/lib/strnlen_user.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle * Copyright (c) 1999 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous /* * Return the size of a string including the ending NUL character up to a * maximum of a1 or 0 in case of error. * * Note: for performance reasons we deliberately accept that a user may * make strlen_user and strnlen_user access the first few KSEG0 * bytes. There's nothing secret there. On 64-bit accessing beyond * the maximum is a tad hairier ... */ .macro __BUILD_STRNLEN_ASM func LEAF(__strnlen_\func\()_asm) LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? and v0, a0 bnez v0, .Lfault\@ move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: #ifdef CONFIG_CPU_DADDI_WORKAROUNDS .set noat li AT, 1 #endif beq v0, a1, 1f # limit reached? .ifeqs "\func", "kernel" EX(lb, t0, (v0), .Lfault\@) .else EX(lbe, t0, (v0), .Lfault\@) .endif .set noreorder bnez t0, 1b 1: #ifndef CONFIG_CPU_DADDI_WORKAROUNDS PTR_ADDIU v0, 1 #else PTR_ADDU v0, AT .set at #endif .set reorder PTR_SUBU v0, a0 jr ra END(__strnlen_\func\()_asm) .Lfault\@: move v0, zero jr ra .endm #ifndef CONFIG_EVA /* Set aliases */ .global __strnlen_user_asm .set __strnlen_user_asm, __strnlen_kernel_asm EXPORT_SYMBOL(__strnlen_user_asm) #endif __BUILD_STRNLEN_ASM kernel EXPORT_SYMBOL(__strnlen_kernel_asm) #ifdef CONFIG_EVA .set push .set eva __BUILD_STRNLEN_ASM user .set pop EXPORT_SYMBOL(__strnlen_user_asm) #endif
AirFortressIlikara/LS2K0300-linux-4.19
17,760
arch/mips/lib/memcpy.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Unified implementation of memcpy, memmove and the __copy_user backend. * * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2014 Imagination Technologies Ltd. * * Mnemonic names for arguments to memcpy/__copy_user */ /* * Hack to resolve longstanding prefetch issue * * Prefetching may be fatal on some systems if we're prefetching beyond the * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ #ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_CPU_MIPSR6 #undef CONFIG_CPU_HAS_PREFETCH #endif #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define dst a0 #define src a1 #define len a2 /* * Spec * * memcpy copies len bytes from src to dst and sets v0 to dst. * It assumes that * - src and dst don't overlap * - src is readable * - dst is writable * memcpy uses the standard calling convention * * __copy_user copies up to len bytes from src to dst and sets a2 (len) to * the number of uncopied bytes due to an exception caused by a read or write. * __copy_user assumes that src and dst don't overlap, and that the call is * implementing one of the following: * copy_to_user * - src is readable (no exceptions when reading src) * copy_from_user * - dst is writable (no exceptions when writing dst) * __copy_user uses a non-standard calling convention; see * include/asm-mips/uaccess.h * * When an exception happens on a load, the handler must # ensure that all of the destination buffer is overwritten to prevent * leaking information to user mode programs. */ /* * Implementation */ /* * The exception handler for loads requires that: * 1- AT contain the address of the byte just past the end of the source * of the copy, * 2- src_entry <= src < AT, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores adjust len (if necessary) and return. * These handlers do not need to overwrite any data. * * For __rmemcpy and memmove an exception is always a kernel bug, therefore * they're not protected. */ /* Instruction type */ #define LD_INSN 1 #define ST_INSN 2 /* Pretech type */ #define SRC_PREFETCH 1 #define DST_PREFETCH 2 #define LEGACY_MODE 1 #define EVA_MODE 2 #define USEROP 1 #define KERNELOP 2 /* * Wrapper to add an entry in the exception table * in case the insn causes a memory exception. * Arguments: * insn : Load/store instruction * type : Instruction type * reg : Register * addr : Address * handler : Exception handler */ #define EXC(insn, type, reg, addr, handler) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous; \ /* This is assembled in EVA mode */ \ .else; \ /* If loading from user or storing to user */ \ .if ((\from == USEROP) && (type == LD_INSN)) || \ ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous; \ .else; \ /* \ * Still in EVA, but no need for \ * exception handler or EVA insn \ */ \ insn reg, addr; \ .endif; \ .endif /* * Only on the 64-bit kernel we can made use of 64-bit registers. */ #ifdef CONFIG_64BIT #define USE_DOUBLE #endif #ifdef USE_DOUBLE #define LOADK ld /* No exception */ #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) #define ADD daddu #define SUB dsubu #define SRL dsrl #define SRA dsra #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #else #define LOADK lw /* No exception */ #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) #define ADD addu #define SUB subu #define SRL srl #define SLL sll #define SRA sra #define SLLV sllv #define SRLV srlv #define NBYTES 4 #define LOG_NBYTES 2 #endif /* USE_DOUBLE */ #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) #define _PREF(hint, addr, type) \ .if \mode == LEGACY_MODE; \ PREF(hint, addr); \ .else; \ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ ((\to == USEROP) && (type == DST_PREFETCH)); \ /* \ * PREFE has only 9 bits for the offset \ * compared to PREF which has 16, so it may \ * need to use the $at register but this \ * register should remain intact because it's \ * used later on. Therefore use $v1. \ */ \ .set at=v1; \ PREFE(hint, addr); \ .set noat; \ .else; \ PREF(hint, addr); \ .endif; \ .endif #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define UNIT(unit) FIRST(unit) #define ADDRMASK (NBYTES-1) .text .set noreorder #ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat #else .set at=v1 #endif .align 5 /* * Macro to build the __copy_user common code * Arguments: * mode : LEGACY_MODE or EVA_MODE * from : Source operand. USEROP or KERNELOP * to : Destination operand. USEROP or KERNELOP */ .macro __BUILD_COPY_USER mode, from, to /* initialize __memcpy if this the first time we execute this macro */ .ifnotdef __memcpy .set __memcpy, 1 .hidden __memcpy /* make sure it does not leak */ .endif /* * Note: dst & src may be unaligned, len may be 0 * Temps */ #define rem t8 R10KCBARRIER(0(ra)) /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will * load queue full replay traps, etc. * * If len < NBYTES use byte operations. */ PREFS( 0, 0(src) ) PREFD( 1, 0(dst) ) sltu t2, len, NBYTES and t1, dst, ADDRMASK PREFS( 0, 1*32(src) ) PREFD( 1, 1*32(dst) ) bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) PREFD( 1, 2*32(dst) ) #ifndef CONFIG_CPU_MIPSR6 bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ #else or t0, t0, t1 bnez t0, .Lcopy_unaligned_bytes\@ #endif /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ .Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) PREFS( 0, 3*32(src) ) PREFD( 1, 3*32(dst) ) .align 4 1: R10KCBARRIER(0(ra)) LOAD(t0, UNIT(0)(src), .Ll_exc\@) LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) ADD src, src, 8*NBYTES ADD dst, dst, 8*NBYTES STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) PREFS( 0, 8*32(src) ) PREFD( 1, 8*32(dst) ) bne len, rem, 1b nop /* * len == rem == the number of bytes left to copy < 8*NBYTES */ .Lcleanup_both_aligned\@: beqz len, .Ldone\@ sltu t0, len, 4*NBYTES bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ LOAD( t0, UNIT(0)(src), .Ll_exc\@) LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES R10KCBARRIER(0(ra)) STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES beqz len, .Ldone\@ .set noreorder .Lless_than_4units\@: /* * rem = len % NBYTES */ beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne rem, len, 1b .set noreorder #ifndef CONFIG_CPU_MIPSR6 /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE * because can't assume read-access to dst. Instead, use * STREST dst, which doesn't require read access to dst. * * This code should perform better than a simple loop on modern, * wide-issue mips processors because the code has fewer branches and * more instruction-level parallelism. */ #define bits t2 beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep LOAD(t0, 0(src), .Ll_exc\@) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) jr ra move len, zero .Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK * t1 = dst & ADDRMASK; T1 > 0 * len >= NBYTES * * Copy enough bytes to align dst * Set match = (src and dst have same alignment) */ #define match rem LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) ADD t2, zero, NBYTES LDREST(t3, REST(0)(src), .Ll_exc_copy\@) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 R10KCBARRIER(0(ra)) STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 beqz match, .Lboth_aligned\@ ADD src, src, t2 .Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter PREFS( 0, 3*32(src) ) beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES PREFD( 1, 3*32(dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ R10KCBARRIER(0(ra)) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES LDREST(t0, REST(0)(src), .Ll_exc_copy\@) LDREST(t1, REST(1)(src), .Ll_exc_copy\@) LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) LDREST(t2, REST(2)(src), .Ll_exc_copy\@) LDREST(t3, REST(3)(src), .Ll_exc_copy\@) PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder .Lcleanup_src_unaligned\@: beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, .Lcopy_bytes\@ nop 1: R10KCBARRIER(0(ra)) LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc_p1u\@) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder #endif /* !CONFIG_CPU_MIPSR6 */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop .Lcopy_bytes\@: /* 0 < len < NBYTES */ R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ LOADB(t0, N(src), .Ll_exc\@); \ SUB len, len, 1; \ beqz len, .Ldone\@; \ STOREB(t0, N(dst), .Ls_exc_p1\@) COPY_BYTE(0) COPY_BYTE(1) #ifdef USE_DOUBLE COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) #endif LOADB(t0, NBYTES-2(src), .Ll_exc\@) SUB len, len, 1 jr ra STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra nop #ifdef CONFIG_CPU_MIPSR6 .Lcopy_unaligned_bytes\@: 1: COPY_BYTE(0) COPY_BYTE(1) COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) COPY_BYTE(6) COPY_BYTE(7) ADD src, src, 8 b 1b ADD dst, dst, 8 #endif /* CONFIG_CPU_MIPSR6 */ .if __memcpy == 1 END(memcpy) .set __memcpy, 0 .hidden __memcpy .endif .Ll_exc_copy\@: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOADK t0, TI_TASK($28) nop LOADK t0, THREAD_BUADDR(t0) 1: LOADB(t1, 0(src), .Ll_exc\@) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user .set reorder /* DADDI_WAR */ ADD dst, dst, 1 bne src, t0, 1b .set noreorder .Ll_exc\@: LOADK t0, TI_TASK($28) nop LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ ADD len, len, n*NBYTES; \ jr ra; \ .set noreorder SEXC(8) SEXC(7) SEXC(6) SEXC(5) SEXC(4) SEXC(3) SEXC(2) SEXC(1) .Ls_exc_p1\@: .set reorder /* DADDI_WAR */ ADD len, len, 1 jr ra .set noreorder .Ls_exc\@: jr ra nop .endm .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADD t0, a0, a2 ADD t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, .L__memcpy move v0, a0 /* return value */ beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, .Lr_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len .Lr_end_bytes: R10KCBARRIER(0(ra)) lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 .set reorder /* DADDI_WAR */ SUB a0, a0, 0x1 bnez a2, .Lr_end_bytes .set noreorder .Lr_out: jr ra move a2, zero .Lr_end_bytes_up: R10KCBARRIER(0(ra)) lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 .set reorder /* DADDI_WAR */ ADD a0, a0, 0x1 bnez a2, .Lr_end_bytes_up .set noreorder jr ra move a2, zero END(__rmemcpy) /* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. * memcpy sets v0 to dst. */ .align 5 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ .L__memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP #ifdef CONFIG_EVA /* * For EVA we need distinct symbols for reading and writing to user space. * This is because we need to use specific EVA instructions to perform the * virtual <-> physical translation when a virtual address is actually in user * space */ /* * __copy_from_user (EVA) */ LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP END(__copy_from_user_eva) /* * __copy_to_user (EVA) */ LEAF(__copy_to_user_eva) EXPORT_SYMBOL(__copy_to_user_eva) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP END(__copy_to_user_eva) /* * __copy_in_user (EVA) */ LEAF(__copy_in_user_eva) EXPORT_SYMBOL(__copy_in_user_eva) __BUILD_COPY_USER EVA_MODE USEROP USEROP END(__copy_in_user_eva) #endif
AirFortressIlikara/LS2K0300-linux-4.19
7,582
arch/mips/lib/memset.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007 by Maciej W. Rozycki * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #if LONGSIZE == 4 #define LONG_S_L swl #define LONG_S_R swr #else #define LONG_S_L sdl #define LONG_S_R sdr #endif #ifdef CONFIG_CPU_MICROMIPS #define STORSIZE (LONGSIZE * 2) #define STORMASK (STORSIZE - 1) #define FILL64RG t8 #define FILLPTRG t7 #undef LONG_S #define LONG_S LONG_SP #else #define STORSIZE LONGSIZE #define STORMASK LONGMASK #define FILL64RG a1 #define FILLPTRG t0 #endif #define LEGACY_MODE 1 #define EVA_MODE 2 /* * No need to protect it with EVA #ifdefery. The generated block of code * will never be assembled if EVA is not enabled. */ #define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) #define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) #define EX(insn,reg,addr,handler) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .else; \ 9: ___BUILD_EVA_INSN(insn, reg, addr); \ .endif; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous .macro f_fill64 dst, offset, val, fixup, mode EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) #if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) #endif #if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) #endif .endm .set noreorder .align 5 /* * Macro to generate the __bzero{,_user} symbol * Arguments: * mode: LEGACY_MODE or EVA_MODE */ .macro __BUILD_BZERO mode /* Initialize __memset if this is the first time we call this macro */ .ifnotdef __memset .set __memset, 1 .hidden __memset /* Make sure it does not leak */ .endif sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset\@ andi t0, a0, STORMASK /* aligned? */ #ifdef CONFIG_CPU_MICROMIPS move t8, a1 /* used by 'swp' instruction */ move t9, a1 #endif #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f PTR_SUBU t0, STORSIZE /* alignment in bytes */ #else .set noat li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at #endif #ifndef CONFIG_CPU_MIPSR6 R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #else EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ #endif PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ #else /* CONFIG_CPU_MIPSR6 */ #define STORE_BYTE(N) \ EX(sb, a1, N(a0), .Lbyte_fixup\@); \ beqz t0, 0f; \ PTR_ADDU t0, 1; PTR_ADDU a2, t0 /* correct size */ PTR_ADDU t0, 1 STORE_BYTE(0) STORE_BYTE(1) #if LONGSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) STORE_BYTE(3) STORE_BYTE(4) STORE_BYTE(5) EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif 0: ori a0, STORMASK xori a0, STORMASK PTR_ADDIU a0, STORSIZE #endif /* CONFIG_CPU_MIPSR6 */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial\@ /* no block to fill */ andi t0, a2, 0x40-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode bne t1, a0, 1b .set noreorder .Lmemset_partial\@: R10KCBARRIER(0(ra)) PTR_LA t1, 2f /* where to start */ #ifdef CONFIG_CPU_MICROMIPS LONG_SRL t7, t0, 1 #endif #if LONGSIZE == 4 PTR_SUBU t1, FILLPTRG #else .set noat LONG_SRL AT, FILLPTRG, 1 PTR_SUBU t1, AT .set at #endif jr t1 PTR_ADDU a0, t0 /* dest ptr */ .set push .set noreorder .set nomacro /* ... but first do longs ... */ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode 2: .set pop andi a2, STORMASK /* At most one long to go */ beqz a2, 1f #ifndef CONFIG_CPU_MIPSR6 PTR_ADDU a0, a2 /* What's left */ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) #else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif #else PTR_SUBU t0, $0, a2 move a2, zero /* No remaining longs */ PTR_ADDIU t0, 1 STORE_BYTE(0) STORE_BYTE(1) #if LONGSIZE == 4 EX(sb, a1, 2(a0), .Lbyte_fixup\@) #else STORE_BYTE(2) STORE_BYTE(3) STORE_BYTE(4) STORE_BYTE(5) EX(sb, a1, 6(a0), .Lbyte_fixup\@) #endif 0: #endif 1: jr ra move a2, zero .Lsmall_memset\@: beqz a2, 2f PTR_ADDU t1, a0, a2 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) bne t1, a0, 1b EX(sb, a1, -1(a0), .Lsmall_fixup\@) 2: jr ra /* done */ move a2, zero .if __memset == 1 END(memset) .set __memset, 0 .hidden __memset .endif #ifdef CONFIG_CPU_MIPSR6 .Lbyte_fixup\@: /* * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1 * a2 = a2 - t0 + 1 */ PTR_SUBU a2, t0 jr ra PTR_ADDIU a2, 1 #endif /* CONFIG_CPU_MIPSR6 */ .Lfirst_fixup\@: /* unset_bytes already in a2 */ jr ra nop .Lfwd_fixup\@: /* * unset_bytes = partial_start_addr + #bytes - fault_addr * a2 = t1 + (a2 & 3f) - $28->task->BUADDR */ PTR_L t0, TI_TASK($28) andi a2, 0x3f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra LONG_SUBU a2, t0 .Lpartial_fixup\@: /* * unset_bytes = partial_end_addr + #bytes - fault_addr * a2 = a0 + (a2 & STORMASK) - $28->task->BUADDR */ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup\@: /* unset_bytes already in a2 */ jr ra nop .Lsmall_fixup\@: /* * unset_bytes = end_addr - current_addr + 1 * a2 = t1 - a0 + 1 */ .set reorder PTR_SUBU a2, t1, a0 PTR_ADDIU a2, 1 jr ra .set noreorder .endm /* * memset(void *s, int c, size_t n) * * a0: start of area to clear * a1: char to fill with * a2: size of area to clear */ LEAF(memset) EXPORT_SYMBOL(memset) beqz a1, 1f move v0, a0 /* result */ andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 or a1, t1 LONG_SLL t1, a1, 16 #if LONGSIZE == 8 or a1, t1 LONG_SLL t1, a1, 32 #endif or a1, t1 1: #ifndef CONFIG_EVA FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) #else FEXPORT(__bzero_kernel) EXPORT_SYMBOL(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE #ifdef CONFIG_EVA LEAF(__bzero) EXPORT_SYMBOL(__bzero) __BUILD_BZERO EVA_MODE END(__bzero) #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,850
arch/mips/lib/strncpy_user.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1999 by Ralf Baechle * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous /* * Returns: -EFAULT if exception before terminator, N if the entire * buffer filled, else strlen. */ /* * Ugly special case have to check: we might get passed a user space * pointer which wraps into the kernel space. We don't deal with that. If * it happens at most some bytes of the exceptions handlers will be copied. */ .macro __BUILD_STRNCPY_ASM func LEAF(__strncpy_from_\func\()_asm) LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? and v0, a1 bnez v0, .Lfault\@ move t0, zero move v1, a1 .ifeqs "\func","kernel" 1: EX(lbu, v0, (v1), .Lfault\@) .else 1: EX(lbue, v0, (v1), .Lfault\@) .endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) sb v0, (a0) beqz v0, 2f PTR_ADDIU t0, 1 PTR_ADDIU a0, 1 bne t0, a2, 1b 2: PTR_ADDU v0, a1, t0 xor v0, a1 bltz v0, .Lfault\@ move v0, t0 jr ra # return n END(__strncpy_from_\func\()_asm) .Lfault\@: li v0, -EFAULT jr ra .section __ex_table,"a" PTR 1b, .Lfault\@ .previous .endm #ifndef CONFIG_EVA /* Set aliases */ .global __strncpy_from_user_asm .set __strncpy_from_user_asm, __strncpy_from_kernel_asm EXPORT_SYMBOL(__strncpy_from_user_asm) #endif __BUILD_STRNCPY_ASM kernel EXPORT_SYMBOL(__strncpy_from_kernel_asm) #ifdef CONFIG_EVA .set push .set eva __BUILD_STRNCPY_ASM user .set pop EXPORT_SYMBOL(__strncpy_from_user_asm) #endif
AirFortressIlikara/LS2K0300-linux-4.19
19,775
arch/mips/lib/csum_partial.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Quick'n'dirty IP checksum ... * * Copyright (C) 1998, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #ifdef CONFIG_64BIT /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #define USE_DOUBLE #endif #ifdef USE_DOUBLE #define LOAD ld #define LOAD32 lwu #define ADD daddu #define NBYTES 8 #else #define LOAD lw #define LOAD32 lw #define ADD addu #define NBYTES 4 #endif /* USE_DOUBLE */ #define UNIT(unit) ((unit)*NBYTES) #define ADDC(sum,reg) \ .set push; \ .set noat; \ ADD sum, reg; \ sltu v1, sum, reg; \ ADD sum, v1; \ .set pop #define ADDC32(sum,reg) \ .set push; \ .set noat; \ addu sum, reg; \ sltu v1, sum, reg; \ addu sum, v1; \ .set pop #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ LOAD _t1, (offset + UNIT(1))(src); \ LOAD _t2, (offset + UNIT(2))(src); \ LOAD _t3, (offset + UNIT(3))(src); \ ADDC(_t0, _t1); \ ADDC(_t2, _t3); \ ADDC(sum, _t0); \ ADDC(sum, _t2) #ifdef USE_DOUBLE #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) #else #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \ CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3) #endif /* * a0: source address * a1: length of the area to checksum * a2: partial checksum */ #define src a0 #define sum v0 .text .set noreorder .align 5 LEAF(csum_partial) EXPORT_SYMBOL(csum_partial) move sum, zero move t7, zero sltiu t8, a1, 0x8 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */ move t2, a1 andi t7, src, 0x1 /* odd buffer? */ .Lhword_align: beqz t7, .Lword_align andi t8, src, 0x2 lbu t0, (src) LONG_SUBU a1, a1, 0x1 #ifdef __MIPSEL__ sll t0, t0, 8 #endif ADDC(sum, t0) PTR_ADDU src, src, 0x1 andi t8, src, 0x2 .Lword_align: beqz t8, .Ldword_align sltiu t8, a1, 56 lhu t0, (src) LONG_SUBU a1, a1, 0x2 ADDC(sum, t0) sltiu t8, a1, 56 PTR_ADDU src, src, 0x2 .Ldword_align: bnez t8, .Ldo_end_words move t8, a1 andi t8, src, 0x4 beqz t8, .Lqword_align andi t8, src, 0x8 LOAD32 t0, 0x00(src) LONG_SUBU a1, a1, 0x4 ADDC(sum, t0) PTR_ADDU src, src, 0x4 andi t8, src, 0x8 .Lqword_align: beqz t8, .Loword_align andi t8, src, 0x10 #ifdef USE_DOUBLE ld t0, 0x00(src) LONG_SUBU a1, a1, 0x8 ADDC(sum, t0) #else lw t0, 0x00(src) lw t1, 0x04(src) LONG_SUBU a1, a1, 0x8 ADDC(sum, t0) ADDC(sum, t1) #endif PTR_ADDU src, src, 0x8 andi t8, src, 0x10 .Loword_align: beqz t8, .Lbegin_movement LONG_SRL t8, a1, 0x7 #ifdef USE_DOUBLE ld t0, 0x00(src) ld t1, 0x08(src) ADDC(sum, t0) ADDC(sum, t1) #else CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4) #endif LONG_SUBU a1, a1, 0x10 PTR_ADDU src, src, 0x10 LONG_SRL t8, a1, 0x7 .Lbegin_movement: beqz t8, 1f andi t2, a1, 0x40 .Lmove_128bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) LONG_SUBU t8, t8, 0x01 .set reorder /* DADDI_WAR */ PTR_ADDU src, src, 0x80 bnez t8, .Lmove_128bytes .set noreorder 1: beqz t2, 1f andi t2, a1, 0x20 .Lmove_64bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) PTR_ADDU src, src, 0x40 1: beqz t2, .Ldo_end_words andi t8, a1, 0x1c .Lmove_32bytes: CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) andi t8, a1, 0x1c PTR_ADDU src, src, 0x20 .Ldo_end_words: beqz t8, .Lsmall_csumcpy andi t2, a1, 0x3 LONG_SRL t8, t8, 0x2 .Lend_words: LOAD32 t0, (src) LONG_SUBU t8, t8, 0x1 ADDC(sum, t0) .set reorder /* DADDI_WAR */ PTR_ADDU src, src, 0x4 bnez t8, .Lend_words .set noreorder /* unknown src alignment and < 8 bytes to go */ .Lsmall_csumcpy: move a1, t2 andi t0, a1, 4 beqz t0, 1f andi t0, a1, 2 /* Still a full word to go */ ulw t1, (src) PTR_ADDIU src, 4 #ifdef USE_DOUBLE dsll t1, t1, 32 /* clear lower 32bit */ #endif ADDC(sum, t1) 1: move t1, zero beqz t0, 1f andi t0, a1, 1 /* Still a halfword to go */ ulhu t1, (src) PTR_ADDIU src, 2 1: beqz t0, 1f sll t1, t1, 16 lbu t2, (src) nop #ifdef __MIPSEB__ sll t2, t2, 8 #endif or t1, t2 1: ADDC(sum, t1) /* fold checksum */ #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 sltu v1, sum, v1 dsra32 sum, sum, 0 addu sum, v1 #endif /* odd buffer alignment? */ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3) .set push .set arch=mips32r2 wsbh v1, sum movn sum, v1, t7 .set pop #else beqz t7, 1f /* odd buffer alignment? */ lui v1, 0x00ff addu v1, 0x00ff and t0, sum, v1 sll t0, t0, 8 srl sum, sum, 8 and sum, sum, v1 or sum, sum, t0 1: #endif .set reorder /* Add the passed partial csum. */ ADDC32(sum, a2) jr ra .set noreorder END(csum_partial) /* * checksum and copy routines based on memcpy.S * * csum_partial_copy_nocheck(src, dst, len, sum) * __csum_partial_copy_kernel(src, dst, len, sum, errp) * * See "Spec" in memcpy.S for details. Unlike __copy_user, all * function in this file use the standard calling convention. */ #define src a0 #define dst a1 #define len a2 #define psum a3 #define sum v0 #define odd t8 #define errptr t9 /* * The exception handler for loads requires that: * 1- AT contain the address of the byte just past the end of the source * of the copy, * 2- src_entry <= src < AT, and * 3- (dst - src) == (dst_entry - src_entry), * The _entry suffix denotes values when __copy_user was called. * * (1) is set up up by __csum_partial_copy_from_user and maintained by * not writing AT in __csum_partial_copy * (2) is met by incrementing src by the number of bytes copied * (3) is met by not doing loads between a pair of increments of dst and src * * The exception handlers for stores stores -EFAULT to errptr and return. * These handlers do not need to overwrite any data. */ /* Instruction type */ #define LD_INSN 1 #define ST_INSN 2 #define LEGACY_MODE 1 #define EVA_MODE 2 #define USEROP 1 #define KERNELOP 2 /* * Wrapper to add an entry in the exception table * in case the insn causes a memory exception. * Arguments: * insn : Load/store instruction * type : Instruction type * reg : Register * addr : Address * handler : Exception handler */ #define EXC(insn, type, reg, addr, handler) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous; \ /* This is enabled in EVA mode */ \ .else; \ /* If loading from user or storing to user */ \ .if ((\from == USEROP) && (type == LD_INSN)) || \ ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous; \ .else; \ /* EVA without exception */ \ insn reg, addr; \ .endif; \ .endif #undef LOAD #ifdef USE_DOUBLE #define LOADK ld /* No exception */ #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) #define ADD daddu #define SUB dsubu #define SRL dsrl #define SLL dsll #define SLLV dsllv #define SRLV dsrlv #define NBYTES 8 #define LOG_NBYTES 3 #else #define LOADK lw /* No exception */ #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) #define ADD addu #define SUB subu #define SRL srl #define SLL sll #define SLLV sllv #define SRLV srlv #define NBYTES 4 #define LOG_NBYTES 2 #endif /* USE_DOUBLE */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR #define LDREST LOADL #define STFIRST STORER #define STREST STOREL #define SHIFT_DISCARD SLLV #define SHIFT_DISCARD_REVERT SRLV #else #define LDFIRST LOADL #define LDREST LOADR #define STFIRST STOREL #define STREST STORER #define SHIFT_DISCARD SRLV #define SHIFT_DISCARD_REVERT SLLV #endif #define FIRST(unit) ((unit)*NBYTES) #define REST(unit) (FIRST(unit)+NBYTES-1) #define ADDRMASK (NBYTES-1) #ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat #else .set at=v1 #endif .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck PTR_ADDU AT, src, len /* See (1) above. */ /* initialize __nocheck if this the first time we execute this * macro */ #ifdef CONFIG_64BIT move errptr, a4 #else lw errptr, 16(sp) #endif .if \__nocheck == 1 FEXPORT(csum_partial_copy_nocheck) EXPORT_SYMBOL(csum_partial_copy_nocheck) .endif move sum, zero move odd, zero /* * Note: dst & src may be unaligned, len may be 0 * Temps */ /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will * load queue full replay traps, etc. * * If len < NBYTES use byte operations. */ sltu t2, len, NBYTES and t1, dst, ADDRMASK bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK andi odd, dst, 0x1 /* odd buffer? */ bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ .Lboth_aligned\@: SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES nop SUB len, 8*NBYTES # subtract here for bgez loop .align 4 1: LOAD(t0, UNIT(0)(src), .Ll_exc\@) LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES ADD src, src, 8*NBYTES STORE(t0, UNIT(0)(dst), .Ls_exc\@) ADDC(t0, t1) STORE(t1, UNIT(1)(dst), .Ls_exc\@) ADDC(sum, t0) STORE(t2, UNIT(2)(dst), .Ls_exc\@) ADDC(t2, t3) STORE(t3, UNIT(3)(dst), .Ls_exc\@) ADDC(sum, t2) STORE(t4, UNIT(4)(dst), .Ls_exc\@) ADDC(t4, t5) STORE(t5, UNIT(5)(dst), .Ls_exc\@) ADDC(sum, t4) STORE(t6, UNIT(6)(dst), .Ls_exc\@) ADDC(t6, t7) STORE(t7, UNIT(7)(dst), .Ls_exc\@) ADDC(sum, t6) .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES bgez len, 1b .set noreorder ADD len, 8*NBYTES # revert len (see above) /* * len == the number of bytes left to copy < 8*NBYTES */ .Lcleanup_both_aligned\@: #define rem t7 beqz len, .Ldone\@ sltu t0, len, 4*NBYTES bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ LOAD(t0, UNIT(0)(src), .Ll_exc\@) LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES STORE(t0, UNIT(0)(dst), .Ls_exc\@) ADDC(t0, t1) STORE(t1, UNIT(1)(dst), .Ls_exc\@) ADDC(sum, t0) STORE(t2, UNIT(2)(dst), .Ls_exc\@) ADDC(t2, t3) STORE(t3, UNIT(3)(dst), .Ls_exc\@) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES beqz len, .Ldone\@ .set noreorder .Lless_than_4units\@: /* * rem = len % NBYTES */ beq rem, len, .Lcopy_bytes\@ nop 1: LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc\@) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne rem, len, 1b .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE * because can't assume read-access to dst. Instead, use * STREST dst, which doesn't require read access to dst. * * This code should perform better than a simple loop on modern, * wide-issue mips processors because the code has fewer branches and * more instruction-level parallelism. */ #define bits t2 beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep LOAD(t0, 0(src), .Ll_exc\@) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) SHIFT_DISCARD_REVERT t0, t0, bits .set reorder ADDC(sum, t0) b .Ldone\@ .set noreorder .Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK * t1 = dst & ADDRMASK; T1 > 0 * len >= NBYTES * * Copy enough bytes to align dst * Set match = (src and dst have same alignment) */ #define match rem LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) ADD t2, zero, NBYTES LDREST(t3, REST(0)(src), .Ll_exc_copy\@) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) SLL t4, t1, 3 # t4 = number of bits to discard SHIFT_DISCARD t3, t3, t4 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ ADDC(sum, t3) beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 beqz match, .Lboth_aligned\@ ADD src, src, t2 .Lsrc_unaligned_dst_aligned\@: SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter beqz t0, .Lcleanup_src_unaligned\@ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 1: /* * Avoid consecutive LD*'s to the same register since some mips * implementations can't issue them in the same cycle. * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES LDREST(t0, REST(0)(src), .Ll_exc_copy\@) LDREST(t1, REST(1)(src), .Ll_exc_copy\@) LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) LDREST(t2, REST(2)(src), .Ll_exc_copy\@) LDREST(t3, REST(3)(src), .Ll_exc_copy\@) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif STORE(t0, UNIT(0)(dst), .Ls_exc\@) ADDC(t0, t1) STORE(t1, UNIT(1)(dst), .Ls_exc\@) ADDC(sum, t0) STORE(t2, UNIT(2)(dst), .Ls_exc\@) ADDC(t2, t3) STORE(t3, UNIT(3)(dst), .Ls_exc\@) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder .Lcleanup_src_unaligned\@: beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES beq rem, len, .Lcopy_bytes\@ nop 1: LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES STORE(t0, 0(dst), .Ls_exc\@) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES bne len, rem, 1b .set noreorder .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop .Lcopy_bytes\@: /* 0 < len < NBYTES */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define SHIFT_START 0 #define SHIFT_INC 8 #else #define SHIFT_START 8*(NBYTES-1) #define SHIFT_INC -8 #endif move t2, zero # partial word li t3, SHIFT_START # shift /* use .Ll_exc_copy here to return correct sum on fault */ #define COPY_BYTE(N) \ LOADBU(t0, N(src), .Ll_exc_copy\@); \ SUB len, len, 1; \ STOREB(t0, N(dst), .Ls_exc\@); \ SLLV t0, t0, t3; \ addu t3, SHIFT_INC; \ beqz len, .Lcopy_bytes_done\@; \ or t2, t0 COPY_BYTE(0) COPY_BYTE(1) #ifdef USE_DOUBLE COPY_BYTE(2) COPY_BYTE(3) COPY_BYTE(4) COPY_BYTE(5) #endif LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@) SUB len, len, 1 STOREB(t0, NBYTES-2(dst), .Ls_exc\@) SLLV t0, t0, t3 or t2, t0 .Lcopy_bytes_done\@: ADDC(sum, t2) .Ldone\@: /* fold checksum */ .set push .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 sltu v1, sum, v1 dsra32 sum, sum, 0 addu sum, v1 #endif #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_LOONGSON3) .set push .set arch=mips32r2 wsbh v1, sum movn sum, v1, odd .set pop #else beqz odd, 1f /* odd buffer alignment? */ lui v1, 0x00ff addu v1, 0x00ff and t0, sum, v1 sll t0, t0, 8 srl sum, sum, 8 and sum, sum, v1 or sum, sum, t0 1: #endif .set pop .set reorder ADDC32(sum, psum) jr ra .set noreorder .Ll_exc_copy\@: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOADK t0, TI_TASK($28) li t2, SHIFT_START LOADK t0, THREAD_BUADDR(t0) 1: LOADBU(t1, 0(src), .Ll_exc\@) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user SLLV t1, t1, t2 addu t2, SHIFT_INC ADDC(sum, t1) .set reorder /* DADDI_WAR */ ADD dst, dst, 1 bne src, t0, 1b .set noreorder .Ll_exc\@: LOADK t0, TI_TASK($28) nop LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes /* * Here's where we rely on src and dst being incremented in tandem, * See (3) above. * dst += (fault addr - src) to put dst at first byte to clear */ ADD dst, t0 # compute start address in a1 SUB dst, src /* * Clear len bytes starting at dst. Can't call __bzero because it * might modify len. An inefficient loop for these rare times... */ .set reorder /* DADDI_WAR */ SUB src, len, 1 beqz len, .Ldone\@ .set noreorder 1: sb zero, 0(dst) ADD dst, dst, 1 .set push .set noat #ifndef CONFIG_CPU_DADDI_WORKAROUNDS bnez src, 1b SUB src, src, 1 #else li v1, 1 bnez src, 1b SUB src, src, v1 #endif li v1, -EFAULT b .Ldone\@ sw v1, (errptr) .Ls_exc\@: li v0, -1 /* invalid checksum */ li v1, -EFAULT jr ra sw v1, (errptr) .set pop .endm LEAF(__csum_partial_copy_kernel) EXPORT_SYMBOL(__csum_partial_copy_kernel) #ifndef CONFIG_EVA FEXPORT(__csum_partial_copy_to_user) EXPORT_SYMBOL(__csum_partial_copy_to_user) FEXPORT(__csum_partial_copy_from_user) EXPORT_SYMBOL(__csum_partial_copy_from_user) #endif __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 END(__csum_partial_copy_kernel) #ifdef CONFIG_EVA LEAF(__csum_partial_copy_to_user) __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 END(__csum_partial_copy_to_user) LEAF(__csum_partial_copy_from_user) __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 END(__csum_partial_copy_from_user) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,407
arch/mips/kvm/msa.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * MIPS SIMD Architecture (MSA) context handling code for KVM. * * Copyright (C) 2015 Imagination Technologies Ltd. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/regdef.h> .set noreorder .set noat LEAF(__kvm_save_msa) st_d 0, VCPU_FPR0, a0 st_d 1, VCPU_FPR1, a0 st_d 2, VCPU_FPR2, a0 st_d 3, VCPU_FPR3, a0 st_d 4, VCPU_FPR4, a0 st_d 5, VCPU_FPR5, a0 st_d 6, VCPU_FPR6, a0 st_d 7, VCPU_FPR7, a0 st_d 8, VCPU_FPR8, a0 st_d 9, VCPU_FPR9, a0 st_d 10, VCPU_FPR10, a0 st_d 11, VCPU_FPR11, a0 st_d 12, VCPU_FPR12, a0 st_d 13, VCPU_FPR13, a0 st_d 14, VCPU_FPR14, a0 st_d 15, VCPU_FPR15, a0 st_d 16, VCPU_FPR16, a0 st_d 17, VCPU_FPR17, a0 st_d 18, VCPU_FPR18, a0 st_d 19, VCPU_FPR19, a0 st_d 20, VCPU_FPR20, a0 st_d 21, VCPU_FPR21, a0 st_d 22, VCPU_FPR22, a0 st_d 23, VCPU_FPR23, a0 st_d 24, VCPU_FPR24, a0 st_d 25, VCPU_FPR25, a0 st_d 26, VCPU_FPR26, a0 st_d 27, VCPU_FPR27, a0 st_d 28, VCPU_FPR28, a0 st_d 29, VCPU_FPR29, a0 st_d 30, VCPU_FPR30, a0 st_d 31, VCPU_FPR31, a0 jr ra nop END(__kvm_save_msa) LEAF(__kvm_restore_msa) ld_d 0, VCPU_FPR0, a0 ld_d 1, VCPU_FPR1, a0 ld_d 2, VCPU_FPR2, a0 ld_d 3, VCPU_FPR3, a0 ld_d 4, VCPU_FPR4, a0 ld_d 5, VCPU_FPR5, a0 ld_d 6, VCPU_FPR6, a0 ld_d 7, VCPU_FPR7, a0 ld_d 8, VCPU_FPR8, a0 ld_d 9, VCPU_FPR9, a0 ld_d 10, VCPU_FPR10, a0 ld_d 11, VCPU_FPR11, a0 ld_d 12, VCPU_FPR12, a0 ld_d 13, VCPU_FPR13, a0 ld_d 14, VCPU_FPR14, a0 ld_d 15, VCPU_FPR15, a0 ld_d 16, VCPU_FPR16, a0 ld_d 17, VCPU_FPR17, a0 ld_d 18, VCPU_FPR18, a0 ld_d 19, VCPU_FPR19, a0 ld_d 20, VCPU_FPR20, a0 ld_d 21, VCPU_FPR21, a0 ld_d 22, VCPU_FPR22, a0 ld_d 23, VCPU_FPR23, a0 ld_d 24, VCPU_FPR24, a0 ld_d 25, VCPU_FPR25, a0 ld_d 26, VCPU_FPR26, a0 ld_d 27, VCPU_FPR27, a0 ld_d 28, VCPU_FPR28, a0 ld_d 29, VCPU_FPR29, a0 ld_d 30, VCPU_FPR30, a0 ld_d 31, VCPU_FPR31, a0 jr ra nop END(__kvm_restore_msa) .macro kvm_restore_msa_upper wr, off, base .set push .set noat #ifdef CONFIG_64BIT ld $1, \off(\base) insert_d \wr, 1 #elif defined(CONFIG_CPU_LITTLE_ENDIAN) lw $1, \off(\base) insert_w \wr, 2 lw $1, (\off+4)(\base) insert_w \wr, 3 #else /* CONFIG_CPU_BIG_ENDIAN */ lw $1, (\off+4)(\base) insert_w \wr, 2 lw $1, \off(\base) insert_w \wr, 3 #endif .set pop .endm LEAF(__kvm_restore_msa_upper) kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 jr ra nop END(__kvm_restore_msa_upper) LEAF(__kvm_restore_msacsr) lw t0, VCPU_MSA_CSR(a0) /* * The ctcmsa must stay at this offset in __kvm_restore_msacsr. * See kvm_mips_csr_die_notify() which handles t0 containing a value * which triggers an MSA FP Exception, which must be stepped over and * ignored since the set cause bits must remain there for the guest. */ _ctcmsa MSA_CSR, t0 jr ra nop END(__kvm_restore_msacsr)
AirFortressIlikara/LS2K0300-linux-4.19
3,064
arch/mips/kvm/fpu.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * FPU context handling code for KVM. * * Copyright (C) 2015 Imagination Technologies Ltd. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/regdef.h> /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ #undef fp .set noreorder .set noat LEAF(__kvm_save_fpu) .set push SET_HARDFLOAT .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip odd doubles nop sdc1 $f1, VCPU_FPR1(a0) sdc1 $f3, VCPU_FPR3(a0) sdc1 $f5, VCPU_FPR5(a0) sdc1 $f7, VCPU_FPR7(a0) sdc1 $f9, VCPU_FPR9(a0) sdc1 $f11, VCPU_FPR11(a0) sdc1 $f13, VCPU_FPR13(a0) sdc1 $f15, VCPU_FPR15(a0) sdc1 $f17, VCPU_FPR17(a0) sdc1 $f19, VCPU_FPR19(a0) sdc1 $f21, VCPU_FPR21(a0) sdc1 $f23, VCPU_FPR23(a0) sdc1 $f25, VCPU_FPR25(a0) sdc1 $f27, VCPU_FPR27(a0) sdc1 $f29, VCPU_FPR29(a0) sdc1 $f31, VCPU_FPR31(a0) 1: sdc1 $f0, VCPU_FPR0(a0) sdc1 $f2, VCPU_FPR2(a0) sdc1 $f4, VCPU_FPR4(a0) sdc1 $f6, VCPU_FPR6(a0) sdc1 $f8, VCPU_FPR8(a0) sdc1 $f10, VCPU_FPR10(a0) sdc1 $f12, VCPU_FPR12(a0) sdc1 $f14, VCPU_FPR14(a0) sdc1 $f16, VCPU_FPR16(a0) sdc1 $f18, VCPU_FPR18(a0) sdc1 $f20, VCPU_FPR20(a0) sdc1 $f22, VCPU_FPR22(a0) sdc1 $f24, VCPU_FPR24(a0) sdc1 $f26, VCPU_FPR26(a0) sdc1 $f28, VCPU_FPR28(a0) jr ra sdc1 $f30, VCPU_FPR30(a0) .set pop END(__kvm_save_fpu) LEAF(__kvm_restore_fpu) .set push SET_HARDFLOAT .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip odd doubles nop ldc1 $f1, VCPU_FPR1(a0) ldc1 $f3, VCPU_FPR3(a0) ldc1 $f5, VCPU_FPR5(a0) ldc1 $f7, VCPU_FPR7(a0) ldc1 $f9, VCPU_FPR9(a0) ldc1 $f11, VCPU_FPR11(a0) ldc1 $f13, VCPU_FPR13(a0) ldc1 $f15, VCPU_FPR15(a0) ldc1 $f17, VCPU_FPR17(a0) ldc1 $f19, VCPU_FPR19(a0) ldc1 $f21, VCPU_FPR21(a0) ldc1 $f23, VCPU_FPR23(a0) ldc1 $f25, VCPU_FPR25(a0) ldc1 $f27, VCPU_FPR27(a0) ldc1 $f29, VCPU_FPR29(a0) ldc1 $f31, VCPU_FPR31(a0) 1: ldc1 $f0, VCPU_FPR0(a0) ldc1 $f2, VCPU_FPR2(a0) ldc1 $f4, VCPU_FPR4(a0) ldc1 $f6, VCPU_FPR6(a0) ldc1 $f8, VCPU_FPR8(a0) ldc1 $f10, VCPU_FPR10(a0) ldc1 $f12, VCPU_FPR12(a0) ldc1 $f14, VCPU_FPR14(a0) ldc1 $f16, VCPU_FPR16(a0) ldc1 $f18, VCPU_FPR18(a0) ldc1 $f20, VCPU_FPR20(a0) ldc1 $f22, VCPU_FPR22(a0) ldc1 $f24, VCPU_FPR24(a0) ldc1 $f26, VCPU_FPR26(a0) ldc1 $f28, VCPU_FPR28(a0) jr ra ldc1 $f30, VCPU_FPR30(a0) .set pop END(__kvm_restore_fpu) LEAF(__kvm_restore_fcsr) .set push SET_HARDFLOAT lw t0, VCPU_FCR31(a0) /* * The ctc1 must stay at this offset in __kvm_restore_fcsr. * See kvm_mips_csr_die_notify() which handles t0 containing a value * which triggers an FP Exception, which must be stepped over and * ignored since the set cause bits must remain there for the guest. */ ctc1 t0, fcr31 jr ra nop .set pop END(__kvm_restore_fcsr)
AirFortressIlikara/LS2K0300-linux-4.19
5,758
arch/mips/kvm/lasx.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * MIPS SIMD Architecture (MSA) Extension context handling code for KVM. * * Copyright (C) 2019 Loongson Inc. * Author: Xing Li, lixing@loongson.cn */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/regdef.h> .set noreorder .set noat LEAF(__kvm_save_lasx) xvst_b 0, VCPU_FPR0, a0 xvst_b 1, VCPU_FPR1, a0 xvst_b 2, VCPU_FPR2, a0 xvst_b 3, VCPU_FPR3, a0 xvst_b 4, VCPU_FPR4, a0 xvst_b 5, VCPU_FPR5, a0 xvst_b 6, VCPU_FPR6, a0 xvst_b 7, VCPU_FPR7, a0 xvst_b 8, VCPU_FPR8, a0 xvst_b 9, VCPU_FPR9, a0 xvst_b 10, VCPU_FPR10, a0 xvst_b 11, VCPU_FPR11, a0 xvst_b 12, VCPU_FPR12, a0 xvst_b 13, VCPU_FPR13, a0 xvst_b 14, VCPU_FPR14, a0 xvst_b 15, VCPU_FPR15, a0 xvst_b 16, VCPU_FPR16, a0 xvst_b 17, VCPU_FPR17, a0 xvst_b 18, VCPU_FPR18, a0 xvst_b 19, VCPU_FPR19, a0 xvst_b 20, VCPU_FPR20, a0 xvst_b 21, VCPU_FPR21, a0 xvst_b 22, VCPU_FPR22, a0 xvst_b 23, VCPU_FPR23, a0 xvst_b 24, VCPU_FPR24, a0 xvst_b 25, VCPU_FPR25, a0 xvst_b 26, VCPU_FPR26, a0 xvst_b 27, VCPU_FPR27, a0 xvst_b 28, VCPU_FPR28, a0 xvst_b 29, VCPU_FPR29, a0 xvst_b 30, VCPU_FPR30, a0 xvst_b 31, VCPU_FPR31, a0 jr ra nop END(__kvm_save_lasx) LEAF(__kvm_restore_lasx) xvld_b 0, VCPU_FPR0, a0 xvld_b 1, VCPU_FPR1, a0 xvld_b 2, VCPU_FPR2, a0 xvld_b 3, VCPU_FPR3, a0 xvld_b 4, VCPU_FPR4, a0 xvld_b 5, VCPU_FPR5, a0 xvld_b 6, VCPU_FPR6, a0 xvld_b 7, VCPU_FPR7, a0 xvld_b 8, VCPU_FPR8, a0 xvld_b 9, VCPU_FPR9, a0 xvld_b 10, VCPU_FPR10, a0 xvld_b 11, VCPU_FPR11, a0 xvld_b 12, VCPU_FPR12, a0 xvld_b 13, VCPU_FPR13, a0 xvld_b 14, VCPU_FPR14, a0 xvld_b 15, VCPU_FPR15, a0 xvld_b 16, VCPU_FPR16, a0 xvld_b 17, VCPU_FPR17, a0 xvld_b 18, VCPU_FPR18, a0 xvld_b 19, VCPU_FPR19, a0 xvld_b 20, VCPU_FPR20, a0 xvld_b 21, VCPU_FPR21, a0 xvld_b 22, VCPU_FPR22, a0 xvld_b 23, VCPU_FPR23, a0 xvld_b 24, VCPU_FPR24, a0 xvld_b 25, VCPU_FPR25, a0 xvld_b 26, VCPU_FPR26, a0 xvld_b 27, VCPU_FPR27, a0 xvld_b 28, VCPU_FPR28, a0 xvld_b 29, VCPU_FPR29, a0 xvld_b 30, VCPU_FPR30, a0 xvld_b 31, VCPU_FPR31, a0 jr ra nop END(__kvm_restore_lasx) .macro kvm_restore_lasx_upper wr, off, base .set push .set noat ld $1, (\off+8)(\base) xinsert_d \wr, 2 ld $1, (\off+16)(\base) xinsert_d \wr, 3 .set pop .endm .macro kvm_restore_lasx_uppest wr, off, base .set push .set noat ld $1, \off(\base) xinsert_d \wr, 1 ld $1, (\off+8)(\base) xinsert_d \wr, 2 ld $1, (\off+16)(\base) xinsert_d \wr, 3 .set pop .endm LEAF(__kvm_restore_lasx_upper) kvm_restore_lasx_upper 0, VCPU_FPR0 +8, a0 kvm_restore_lasx_upper 1, VCPU_FPR1 +8, a0 kvm_restore_lasx_upper 2, VCPU_FPR2 +8, a0 kvm_restore_lasx_upper 3, VCPU_FPR3 +8, a0 kvm_restore_lasx_upper 4, VCPU_FPR4 +8, a0 kvm_restore_lasx_upper 5, VCPU_FPR5 +8, a0 kvm_restore_lasx_upper 6, VCPU_FPR6 +8, a0 kvm_restore_lasx_upper 7, VCPU_FPR7 +8, a0 kvm_restore_lasx_upper 8, VCPU_FPR8 +8, a0 kvm_restore_lasx_upper 9, VCPU_FPR9 +8, a0 kvm_restore_lasx_upper 10, VCPU_FPR10+8, a0 kvm_restore_lasx_upper 11, VCPU_FPR11+8, a0 kvm_restore_lasx_upper 12, VCPU_FPR12+8, a0 kvm_restore_lasx_upper 13, VCPU_FPR13+8, a0 kvm_restore_lasx_upper 14, VCPU_FPR14+8, a0 kvm_restore_lasx_upper 15, VCPU_FPR15+8, a0 kvm_restore_lasx_upper 16, VCPU_FPR16+8, a0 kvm_restore_lasx_upper 17, VCPU_FPR17+8, a0 kvm_restore_lasx_upper 18, VCPU_FPR18+8, a0 kvm_restore_lasx_upper 19, VCPU_FPR19+8, a0 kvm_restore_lasx_upper 20, VCPU_FPR20+8, a0 kvm_restore_lasx_upper 21, VCPU_FPR21+8, a0 kvm_restore_lasx_upper 22, VCPU_FPR22+8, a0 kvm_restore_lasx_upper 23, VCPU_FPR23+8, a0 kvm_restore_lasx_upper 24, VCPU_FPR24+8, a0 kvm_restore_lasx_upper 25, VCPU_FPR25+8, a0 kvm_restore_lasx_upper 26, VCPU_FPR26+8, a0 kvm_restore_lasx_upper 27, VCPU_FPR27+8, a0 kvm_restore_lasx_upper 28, VCPU_FPR28+8, a0 kvm_restore_lasx_upper 29, VCPU_FPR29+8, a0 kvm_restore_lasx_upper 30, VCPU_FPR30+8, a0 kvm_restore_lasx_upper 31, VCPU_FPR31+8, a0 jr ra nop END(__kvm_restore_lasx_upper) LEAF(__kvm_restore_lasx_uppest) kvm_restore_lasx_uppest 0, VCPU_FPR0 +8, a0 kvm_restore_lasx_uppest 1, VCPU_FPR1 +8, a0 kvm_restore_lasx_uppest 2, VCPU_FPR2 +8, a0 kvm_restore_lasx_uppest 3, VCPU_FPR3 +8, a0 kvm_restore_lasx_uppest 4, VCPU_FPR4 +8, a0 kvm_restore_lasx_uppest 5, VCPU_FPR5 +8, a0 kvm_restore_lasx_uppest 6, VCPU_FPR6 +8, a0 kvm_restore_lasx_uppest 7, VCPU_FPR7 +8, a0 kvm_restore_lasx_uppest 8, VCPU_FPR8 +8, a0 kvm_restore_lasx_uppest 9, VCPU_FPR9 +8, a0 kvm_restore_lasx_uppest 10, VCPU_FPR10+8, a0 kvm_restore_lasx_uppest 11, VCPU_FPR11+8, a0 kvm_restore_lasx_uppest 12, VCPU_FPR12+8, a0 kvm_restore_lasx_uppest 13, VCPU_FPR13+8, a0 kvm_restore_lasx_uppest 14, VCPU_FPR14+8, a0 kvm_restore_lasx_uppest 15, VCPU_FPR15+8, a0 kvm_restore_lasx_uppest 16, VCPU_FPR16+8, a0 kvm_restore_lasx_uppest 17, VCPU_FPR17+8, a0 kvm_restore_lasx_uppest 18, VCPU_FPR18+8, a0 kvm_restore_lasx_uppest 19, VCPU_FPR19+8, a0 kvm_restore_lasx_uppest 20, VCPU_FPR20+8, a0 kvm_restore_lasx_uppest 21, VCPU_FPR21+8, a0 kvm_restore_lasx_uppest 22, VCPU_FPR22+8, a0 kvm_restore_lasx_uppest 23, VCPU_FPR23+8, a0 kvm_restore_lasx_uppest 24, VCPU_FPR24+8, a0 kvm_restore_lasx_uppest 25, VCPU_FPR25+8, a0 kvm_restore_lasx_uppest 26, VCPU_FPR26+8, a0 kvm_restore_lasx_uppest 27, VCPU_FPR27+8, a0 kvm_restore_lasx_uppest 28, VCPU_FPR28+8, a0 kvm_restore_lasx_uppest 29, VCPU_FPR29+8, a0 kvm_restore_lasx_uppest 30, VCPU_FPR30+8, a0 kvm_restore_lasx_uppest 31, VCPU_FPR31+8, a0 jr ra nop END(__kvm_restore_lasx_uppest)
AirFortressIlikara/LS2K0300-linux-4.19
7,004
arch/mips/net/bpf_jit_asm.S
/* * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF * compiler. * * Copyright (C) 2015 Imagination Technologies Ltd. * Author: Markos Chandras <markos.chandras@imgtec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License. */ #include <asm/asm.h> #include <asm/isa-rev.h> #include <asm/regdef.h> #include "bpf_jit.h" /* ABI * * r_skb_hl skb header length * r_skb_data skb data * r_off(a1) offset register * r_A BPF register A * r_X PF register X * r_skb(a0) *skb * r_M *scratch memory * r_skb_le skb length * r_s0 Scratch register 0 * r_s1 Scratch register 1 * * On entry: * a0: *skb * a1: offset (imm or imm + X) * * All non-BPF-ABI registers are free for use. On return, we only * care about r_ret. The BPF-ABI registers are assumed to remain * unmodified during the entire filter operation. */ #define skb a0 #define offset a1 #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ /* We know better :) so prevent assembler reordering etc */ .set noreorder #define is_offset_negative(TYPE) \ /* If offset is negative we have more work to do */ \ slti t0, offset, 0; \ bgtz t0, bpf_slow_path_##TYPE##_neg; \ /* Be careful what follows in DS. */ #define is_offset_in_header(SIZE, TYPE) \ /* Reading from header? */ \ addiu $r_s0, $r_skb_hl, -SIZE; \ slt t0, $r_s0, offset; \ bgtz t0, bpf_slow_path_##TYPE; \ LEAF(sk_load_word) is_offset_negative(word) FEXPORT(sk_load_word_positive) is_offset_in_header(4, word) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset .set reorder lw $r_A, 0(t1) .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN # if MIPS_ISA_REV >= 2 wsbh t0, $r_A rotr $r_A, t0, 16 # else sll t0, $r_A, 24 srl t1, $r_A, 24 srl t2, $r_A, 8 or t0, t0, t1 andi t2, t2, 0xff00 andi t1, $r_A, 0xff00 or t0, t0, t2 sll t1, t1, 8 or $r_A, t0, t1 # endif #endif jr $r_ra move $r_ret, zero END(sk_load_word) LEAF(sk_load_half) is_offset_negative(half) FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset lhu $r_A, 0(t1) #ifdef CONFIG_CPU_LITTLE_ENDIAN # if MIPS_ISA_REV >= 2 wsbh $r_A, $r_A # else sll t0, $r_A, 8 srl t1, $r_A, 8 andi t0, t0, 0xff00 or $r_A, t0, t1 # endif #endif jr $r_ra move $r_ret, zero END(sk_load_half) LEAF(sk_load_byte) is_offset_negative(byte) FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset lbu $r_A, 0(t1) jr $r_ra move $r_ret, zero END(sk_load_byte) /* * call skb_copy_bits: * (prototype in linux/skbuff.h) * * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) * * o32 mandates we leave 4 spaces for argument registers in case * the callee needs to use them. Even though we don't care about * the argument registers ourselves, we need to allocate that space * to remain ABI compliant since the callee may want to use that space. * We also allocate 2 more spaces for $r_ra and our return register (*to). * * n64 is a bit different. The *caller* will allocate the space to preserve * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no * good reason but it does not matter that much really. * * (void *to) is returned in r_s0 * */ #ifdef CONFIG_CPU_LITTLE_ENDIAN #define DS_OFFSET(SIZE) (4 * SZREG) #else #define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) #endif #define bpf_slow_path_common(SIZE) \ /* Quick check. Are we within reasonable boundaries? */ \ LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ sltu $r_s0, offset, $r_s1; \ beqz $r_s0, fault; \ /* Load 4th argument in DS */ \ LONG_ADDIU a3, zero, SIZE; \ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ PTR_LA t0, skb_copy_bits; \ PTR_S $r_ra, (5 * SZREG)($r_sp); \ /* Assign low slot to a2 */ \ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ jalr t0; \ /* Reset our destination slot (DS but it's ok) */ \ INT_S zero, (4 * SZREG)($r_sp); \ /* \ * skb_copy_bits returns 0 on success and -EFAULT \ * on error. Our data live in a2. Do not bother with \ * our data if an error has been returned. \ */ \ /* Restore our frame */ \ PTR_L $r_ra, (5 * SZREG)($r_sp); \ INT_L $r_s0, (4 * SZREG)($r_sp); \ bltz v0, fault; \ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ move $r_ret, zero; \ NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) bpf_slow_path_common(4) #ifdef CONFIG_CPU_LITTLE_ENDIAN # if MIPS_ISA_REV >= 2 wsbh t0, $r_s0 jr $r_ra rotr $r_A, t0, 16 # else sll t0, $r_s0, 24 srl t1, $r_s0, 24 srl t2, $r_s0, 8 or t0, t0, t1 andi t2, t2, 0xff00 andi t1, $r_s0, 0xff00 or t0, t0, t2 sll t1, t1, 8 jr $r_ra or $r_A, t0, t1 # endif #else jr $r_ra move $r_A, $r_s0 #endif END(bpf_slow_path_word) NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) bpf_slow_path_common(2) #ifdef CONFIG_CPU_LITTLE_ENDIAN # if MIPS_ISA_REV >= 2 jr $r_ra wsbh $r_A, $r_s0 # else sll t0, $r_s0, 8 andi t1, $r_s0, 0xff00 andi t0, t0, 0xff00 srl t1, t1, 8 jr $r_ra or $r_A, t0, t1 # endif #else jr $r_ra move $r_A, $r_s0 #endif END(bpf_slow_path_half) NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) bpf_slow_path_common(1) jr $r_ra move $r_A, $r_s0 END(bpf_slow_path_byte) /* * Negative entry points */ .macro bpf_is_end_of_data li t0, SKF_LL_OFF /* Reading link layer data? */ slt t1, offset, t0 bgtz t1, fault /* Be careful what follows in DS. */ .endm /* * call skb_copy_bits: * (prototype in linux/filter.h) * * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, * int k, unsigned int size) * * see above (bpf_slow_path_common) for ABI restrictions */ #define bpf_negative_common(SIZE) \ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ PTR_S $r_ra, (5 * SZREG)($r_sp); \ jalr t0; \ li a2, SIZE; \ PTR_L $r_ra, (5 * SZREG)($r_sp); \ /* Check return pointer */ \ beqz v0, fault; \ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ /* Preserve our pointer */ \ move $r_s0, v0; \ /* Set return value */ \ move $r_ret, zero; \ bpf_slow_path_word_neg: bpf_is_end_of_data NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) bpf_negative_common(4) jr $r_ra lw $r_A, 0($r_s0) END(sk_load_word_negative) bpf_slow_path_half_neg: bpf_is_end_of_data NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) bpf_negative_common(2) jr $r_ra lhu $r_A, 0($r_s0) END(sk_load_half_negative) bpf_slow_path_byte_neg: bpf_is_end_of_data NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) bpf_negative_common(1) jr $r_ra lbu $r_A, 0($r_s0) END(sk_load_byte_negative) fault: jr $r_ra addiu $r_ret, zero, 1
AirFortressIlikara/LS2K0300-linux-4.19
4,645
arch/mips/mm/cex-sb1.S
/* * Copyright (C) 2001,2002,2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/cacheops.h> #include <asm/sibyte/board.h> #define C0_ERRCTL $26 /* CP0: Error info */ #define C0_CERR_I $27 /* CP0: Icache error */ #define C0_CERR_D $27,1 /* CP0: Dcache error */ /* * Based on SiByte sample software cache-err/cerr.S * CVS revision 1.8. Only the 'unrecoverable' case * is changed. */ .set mips64 .set noreorder .set noat /* * sb1_cerr_vec: code to be copied to the Cache Error * Exception vector. The code must be pushed out to memory * (either by copying to Kseg0 and Kseg1 both, or by flushing * the L1 and L2) since it is fetched as 0xa0000100. * * NOTE: Be sure this handler is at most 28 instructions long * since the final 16 bytes of the exception vector memory * (0x170-0x17f) are used to preserve k0, k1, and ra. */ LEAF(except_vec2_sb1) /* * If this error is recoverable, we need to exit the handler * without having dirtied any registers. To do this, * save/restore k0 and k1 from low memory (Useg is direct * mapped while ERL=1). Note that we can't save to a * CPU-specific location without ruining a register in the * process. This means we are vulnerable to data corruption * whenever the handler is reentered by a second CPU. */ sd k0,0x170($0) sd k1,0x178($0) #ifdef CONFIG_SB1_CEX_ALWAYS_FATAL j handle_vec2_sb1 nop #else /* * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell * if we can fast-path out of here for a h/w-recovered error. */ mfc0 k1,C0_ERRCTL bgtz k1,attempt_recovery sll k0,k1,1 recovered_dcache: /* * Unlock CacheErr-D (which in turn unlocks CacheErr-DPA). * Ought to log the occurrence of this recovered dcache error. */ b recovered mtc0 $0,C0_CERR_D attempt_recovery: /* * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any * Dcache errors we can recover from will take more extensive * processing. For now, they are considered "unrecoverable". * Note that 'DC' becoming set (outside of ERL mode) will * cause 'IC' to clear; so if there's an Icache error, we'll * only find out about it if we recover from this error and * continue executing. */ bltz k0,unrecoverable sll k0,1 /* * k0 has C0_ERRCTL << 2, which puts 'IC' at bit 31. If an * Icache error isn't indicated, I'm not sure why we got here. * Consider that case "unrecoverable" for now. */ bgez k0,unrecoverable attempt_icache_recovery: /* * External icache errors are due to uncorrectable ECC errors * in the L2 cache or Memory Controller and cannot be * recovered here. */ mfc0 k0,C0_CERR_I /* delay slot */ li k1,1 << 26 /* ICACHE_EXTERNAL */ and k1,k0 bnez k1,unrecoverable andi k0,0x1fe0 /* * Since the error is internal, the 'IDX' field from * CacheErr-I is valid and we can just invalidate all blocks * in that set. */ cache Index_Invalidate_I,(0<<13)(k0) cache Index_Invalidate_I,(1<<13)(k0) cache Index_Invalidate_I,(2<<13)(k0) cache Index_Invalidate_I,(3<<13)(k0) /* Ought to log this recovered icache error */ recovered: /* Restore the saved registers */ ld k0,0x170($0) ld k1,0x178($0) eret unrecoverable: /* Unrecoverable Icache or Dcache error; log it and/or fail */ j handle_vec2_sb1 nop #endif END(except_vec2_sb1) LEAF(handle_vec2_sb1) mfc0 k0,CP0_CONFIG li k1,~CONF_CM_CMASK and k0,k0,k1 ori k0,k0,CONF_CM_UNCACHED mtc0 k0,CP0_CONFIG SSNOP SSNOP SSNOP SSNOP bnezl $0, 1f 1: mfc0 k0, CP0_STATUS sll k0, k0, 3 # check CU0 (kernel?) bltz k0, 2f nop /* Get a valid Kseg0 stack pointer. Any task's stack pointer * will do, although if we ever want to resume execution we * better not have corrupted any state. */ get_saved_sp move sp, k1 2: j sb1_cache_error nop END(handle_vec2_sb1)
AirFortressIlikara/LS2K0300-linux-4.19
3,471
arch/mips/mm/page-funcs.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Micro-assembler generated clear_page/copy_page functions. * * Copyright (C) 2012 MIPS Technologies, Inc. * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> */ #include <asm/asm.h> #include <asm/export.h> #include <asm/regdef.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS #define cpu_clear_page_function_name clear_page_cpu #define cpu_copy_page_function_name copy_page_cpu #else #define cpu_clear_page_function_name clear_page #define cpu_copy_page_function_name copy_page #endif #ifdef CONFIG_PAGE_SIZE_4KB #define PAGE_SHIFT 12 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PAGE_SHIFT 13 #endif #ifdef CONFIG_PAGE_SIZE_16KB #define PAGE_SHIFT 14 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PAGE_SHIFT 15 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (1 << PAGE_SHIFT) #if !defined(CONFIG_CPU_LOONGSON3) && !defined(CONFIG_CPU_LOONGSON2K) /* * Maximum sizes: * * R4000 128 bytes S-cache: 0x058 bytes * R4600 v1.7: 0x05c bytes * R4600 v2.0: 0x060 bytes * With prefetching, 16 word strides 0x120 bytes */ EXPORT(__clear_page_start) LEAF(cpu_clear_page_function_name) EXPORT_SYMBOL(cpu_clear_page_function_name) 1: j 1b /* Dummy, will be replaced. */ .space 288 END(cpu_clear_page_function_name) EXPORT(__clear_page_end) /* * Maximum sizes: * * R4000 128 bytes S-cache: 0x11c bytes * R4600 v1.7: 0x080 bytes * R4600 v2.0: 0x07c bytes * With prefetching, 16 word strides 0x540 bytes */ EXPORT(__copy_page_start) LEAF(cpu_copy_page_function_name) EXPORT_SYMBOL(cpu_copy_page_function_name) 1: j 1b /* Dummy, will be replaced. */ .space 1344 END(cpu_copy_page_function_name) EXPORT(__copy_page_end) #endif #if defined(CONFIG_CPU_LOONGSON3) || defined(CONFIG_CPU_LOONGSON2K) /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 .set noreorder .align 5 LEAF(clear_page) EXPORT_SYMBOL(clear_page) ori a2, a0, (PAGE_SIZE - 128) daddiu a2, a2, 128 1: gssq zero, zero, 0(a0) gssq zero, zero, 16(a0) gssq zero, zero, 32(a0) gssq zero, zero, 48(a0) daddiu a0, a0, 128 gssq zero, zero, -64(a0) gssq zero, zero, -48(a0) gssq zero, zero, -32(a0) bne a2, a0, 1b gssq zero, zero, -16(a0) jr ra nop END(clear_page) .set noreorder .align 5 LEAF(copy_page) EXPORT_SYMBOL(copy_page) ori a2, a0, (PAGE_SIZE - 128) daddiu a2, a2, 128 1: gslq t1, t0, 0(a1) gslq t3, t2, 16(a1) gslq t5, t4, 32(a1) gslq t7, t6, 48(a1) gssq t1, t0, 0(a0) gslq t1, t0, 64(a1) gssq t3, t2, 16(a0) gslq t3, t2, 80(a1) gssq t5, t4, 32(a0) gslq t5, t4, 96(a1) gssq t7, t6, 48(a0) gslq t7, t6, 112(a1) daddiu a0, a0, 128 daddiu a1, a1, 128 gssq t1, t0, -64(a0) gssq t3, t2, -48(a0) gssq t5, t4, -32(a0) bne a2, a0, 1b gssq t7, t6, -16(a0) jr ra nop END(copy_page) #endif
AirFortressIlikara/LS2K0300-linux-4.19
1,501
arch/mips/mm/cex-oct.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006 Cavium Networks * Cache error handler */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> /* * Handle cache error. Indicate to the second level handler whether * the exception is recoverable. */ LEAF(except_vec2_octeon) .set push .set mips64r2 .set noreorder .set noat /* due to an errata we need to read the COP0 CacheErr (Dcache) * before any cache/DRAM access */ rdhwr k0, $0 /* get core_id */ PTR_LA k1, cache_err_dcache sll k0, k0, 3 PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */ dmfc0 k0, CP0_CACHEERR, 1 sd k0, (k1) dmtc0 $0, CP0_CACHEERR, 1 /* check whether this is a nested exception */ mfc0 k1, CP0_STATUS andi k1, k1, ST0_EXL beqz k1, 1f nop j cache_parity_error_octeon_non_recoverable nop /* exception is recoverable */ 1: j handle_cache_err nop .set pop END(except_vec2_octeon) /* We need to jump to handle_cache_err so that the previous handler * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */ LEAF(handle_cache_err) .set push .set noreorder .set noat SAVE_ALL KMODE jal cache_parity_error_octeon_recoverable nop j ret_from_exception nop .set pop END(handle_cache_err)
AirFortressIlikara/LS2K0300-linux-4.19
7,737
arch/mips/netlogic/common/reset.S
/* * Copyright 2003-2013 Broadcom Corporation. * All Rights Reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the Broadcom * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/cpu.h> #include <asm/cacheops.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/asmmacro.h> #include <asm/addrspace.h> #include <asm/netlogic/common.h> #include <asm/netlogic/xlp-hal/iomap.h> #include <asm/netlogic/xlp-hal/xlp.h> #include <asm/netlogic/xlp-hal/sys.h> #include <asm/netlogic/xlp-hal/cpucontrol.h> #define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \ XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \ SYS_CPU_NONCOHERENT_MODE * 4 /* Enable XLP features and workarounds in the LSU */ .macro xlp_config_lsu li t0, LSU_DEFEATURE mfcr t1, t0 lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */ or t1, t1, t2 mtcr t1, t0 li t0, ICU_DEFEATURE mfcr t1, t0 ori t1, 0x1000 /* Enable Icache partitioning */ mtcr t1, t0 li t0, SCHED_DEFEATURE lui t1, 0x0100 /* Disable BRU accepting ALU ops */ mtcr t1, t0 .endm /* * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN * register. This is needed before going to C code since the SP can * in this region. Called from all HW threads. */ .macro xlp_early_mmu_init mfc0 t0, CP0_PAGEMASK, 1 li t1, (1 << 29) /* ELPA bit */ or t0, t1 mtc0 t0, CP0_PAGEMASK, 1 .endm /* * L1D cache has to be flushed before enabling threads in XLP. * On XLP8xx/XLP3xx, we do a low level flush using processor control * registers. On XLPII CPUs, usual cache instructions work. */ .macro xlp_flush_l1_dcache mfc0 t0, CP0_PRID andi t0, t0, PRID_IMP_MASK slt t1, t0, 0x1200 beqz t1, 15f nop /* XLP8xx low level cache flush */ li t0, LSU_DEBUG_DATA0 li t1, LSU_DEBUG_ADDR li t2, 0 /* index */ li t3, 0x1000 /* loop count */ 11: sll v0, t2, 5 mtcr zero, t0 ori v1, v0, 0x3 /* way0 | write_enable | write_active */ mtcr v1, t1 12: mfcr v1, t1 andi v1, 0x1 /* wait for write_active == 0 */ bnez v1, 12b nop mtcr zero, t0 ori v1, v0, 0x7 /* way1 | write_enable | write_active */ mtcr v1, t1 13: mfcr v1, t1 andi v1, 0x1 /* wait for write_active == 0 */ bnez v1, 13b nop addi t2, 1 bne t3, t2, 11b nop b 17f nop /* XLPII CPUs, Invalidate all 64k of L1 D-cache */ 15: li t0, 0x80000000 li t1, 0x80010000 16: cache Index_Writeback_Inv_D, 0(t0) addiu t0, t0, 32 bne t0, t1, 16b nop 17: .endm /* * nlm_reset_entry will be copied to the reset entry point for * XLR and XLP. The XLP cores start here when they are woken up. This * is also the NMI entry point. * * We use scratch reg 6/7 to save k0/k1 and check for NMI first. * * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS * location, this will have the thread mask (used when core is woken up) * and the current NMI handler in case we reached here for an NMI. * * When a core or thread is newly woken up, it marks itself ready and * loops in a 'wait'. When the CPU really needs waking up, we send an NMI * IPI to it, with the NMI handler set to prom_boot_secondary_cpus */ .set noreorder .set noat .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ FEXPORT(nlm_reset_entry) dmtc0 k0, $22, 6 dmtc0 k1, $22, 7 mfc0 k0, CP0_STATUS li k1, 0x80000 and k1, k0, k1 beqz k1, 1f /* go to real reset entry */ nop li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */ ld k0, BOOT_NMI_HANDLER(k1) jr k0 nop 1: /* Entry point on core wakeup */ mfc0 t0, CP0_PRID /* processor ID */ andi t0, PRID_IMP_MASK li t1, 0x1500 /* XLP 9xx */ beq t0, t1, 2f /* does not need to set coherent */ nop li t1, 0x1300 /* XLP 5xx */ beq t0, t1, 2f /* does not need to set coherent */ nop /* set bit in SYS coherent register for the core */ mfc0 t0, CP0_EBASE mfc0 t1, CP0_EBASE srl t1, 5 andi t1, 0x3 /* t1 <- node */ li t2, 0x40000 mul t3, t2, t1 /* t3 = node * 0x40000 */ srl t0, t0, 2 and t0, t0, 0x7 /* t0 <- core */ li t1, 0x1 sll t0, t1, t0 nor t0, t0, zero /* t0 <- ~(1 << core) */ li t2, SYS_CPU_COHERENT_BASE add t2, t2, t3 /* t2 <- SYS offset for node */ lw t1, 0(t2) and t1, t1, t0 sw t1, 0(t2) /* read back to ensure complete */ lw t1, 0(t2) sync 2: /* Configure LSU on Non-0 Cores. */ xlp_config_lsu /* FALL THROUGH */ /* * Wake up sibling threads from the initial thread in a core. */ EXPORT(nlm_boot_siblings) /* core L1D flush before enable threads */ xlp_flush_l1_dcache /* save ra and sp, will be used later (only for boot cpu) */ dmtc0 ra, $22, 6 dmtc0 sp, $22, 7 /* Enable hw threads by writing to MAP_THREADMODE of the core */ li t0, CKSEG1ADDR(RESET_DATA_PHYS) lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE) mfcr t2, t0 or t2, t2, t1 mtcr t2, t0 /* * The new hardware thread starts at the next instruction * For all the cases other than core 0 thread 0, we will * jump to the secondary wait function. * NOTE: All GPR contents are lost after the mtcr above! */ mfc0 v0, CP0_EBASE andi v0, 0x3ff /* v0 <- node/core */ /* * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE * when running 4 threads per core */ andi v1, v0, 0x3 /* v1 <- thread id */ bnez v1, 2f nop /* thread 0 of each core. */ li t0, CKSEG1ADDR(RESET_DATA_PHYS) lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */ subu t1, 0x3 /* 4-thread per core mode? */ bnez t1, 2f nop li t0, IFU_BRUB_RESERVE li t1, 0x55 mtcr t1, t0 _ehb 2: beqz v0, 4f /* boot cpu (cpuid == 0)? */ nop /* setup status reg */ move t1, zero #ifdef CONFIG_64BIT ori t1, ST0_KX #endif mtc0 t1, CP0_STATUS xlp_early_mmu_init /* mark CPU ready */ li t3, CKSEG1ADDR(RESET_DATA_PHYS) ADDIU t1, t3, BOOT_CPU_READY sll v1, v0, 2 PTR_ADDU t1, v1 li t2, 1 sw t2, 0(t1) /* Wait until NMI hits */ 3: wait b 3b nop /* * For the boot CPU, we have to restore ra and sp and return, rest * of the registers will be restored by the caller */ 4: dmfc0 ra, $22, 6 dmfc0 sp, $22, 7 jr ra nop EXPORT(nlm_reset_entry_end) LEAF(nlm_init_boot_cpu) #ifdef CONFIG_CPU_XLP xlp_config_lsu xlp_early_mmu_init #endif jr ra nop END(nlm_init_boot_cpu)
AirFortressIlikara/LS2K0300-linux-4.19
4,239
arch/mips/netlogic/common/smpboot.S
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> #include <asm/asmmacro.h> #include <asm/addrspace.h> #include <asm/netlogic/common.h> #include <asm/netlogic/xlp-hal/iomap.h> #include <asm/netlogic/xlp-hal/xlp.h> #include <asm/netlogic/xlp-hal/sys.h> #include <asm/netlogic/xlp-hal/cpucontrol.h> .set noreorder .set noat .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */ /* Called by the boot cpu to wake up its sibling threads */ NESTED(xlp_boot_core0_siblings, PT_SIZE, sp) /* CPU register contents lost when enabling threads, save them first */ SAVE_ALL sync /* find the location to which nlm_boot_siblings was relocated */ li t0, CKSEG1ADDR(RESET_VEC_PHYS) PTR_LA t1, nlm_reset_entry PTR_LA t2, nlm_boot_siblings dsubu t2, t1 daddu t2, t0 /* call it */ jalr t2 nop RESTORE_ALL jr ra nop END(xlp_boot_core0_siblings) NESTED(nlm_boot_secondary_cpus, 16, sp) /* Initialize CP0 Status */ move t1, zero #ifdef CONFIG_64BIT ori t1, ST0_KX #endif mtc0 t1, CP0_STATUS PTR_LA t1, nlm_next_sp PTR_L sp, 0(t1) PTR_LA t1, nlm_next_gp PTR_L gp, 0(t1) /* a0 has the processor id */ mfc0 a0, CP0_EBASE andi a0, 0x3ff /* a0 <- node/core */ PTR_LA t0, nlm_early_init_secondary jalr t0 nop PTR_LA t0, smp_bootstrap jr t0 nop END(nlm_boot_secondary_cpus) /* * In case of RMIboot bootloader which is used on XLR boards, the CPUs * be already woken up and waiting in bootloader code. * This will get them out of the bootloader code and into linux. Needed * because the bootloader area will be taken and initialized by linux. */ NESTED(nlm_rmiboot_preboot, 16, sp) mfc0 t0, $15, 1 /* read ebase */ andi t0, 0x1f /* t0 has the processor_id() */ andi t2, t0, 0x3 /* thread num */ sll t0, 2 /* offset in cpu array */ li t3, CKSEG1ADDR(RESET_DATA_PHYS) ADDIU t1, t3, BOOT_CPU_READY ADDU t1, t0 li t3, 1 sw t3, 0(t1) bnez t2, 1f /* skip thread programming */ nop /* for thread id != 0 */ /* * XLR MMU setup only for first thread in core */ li t0, 0x400 mfcr t1, t0 li t2, 6 /* XLR thread mode mask */ nor t3, t2, zero and t2, t1, t2 /* t2 - current thread mode */ li v0, CKSEG1ADDR(RESET_DATA_PHYS) lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */ sll v1, 1 beq v1, t2, 1f /* same as request value */ nop /* nothing to do */ and t2, t1, t3 /* mask out old thread mode */ or t1, t2, v1 /* put in new value */ mtcr t1, t0 /* update core control */ /* wait for NMI to hit */ 1: wait b 1b nop END(nlm_rmiboot_preboot)
AirFortressIlikara/LS2K0300-linux-4.19
13,320
arch/mips/loongson64/loongson-3/loongson3-memcpy.S
/* ============================================================================ Name : memcpy.S Author : Heiher <root@heiher.info> Chen Jie <chenj@lemote.com> Version : 20140307 Copyright : GPLv2 Description : The memcpy for Loongson 3. ============================================================================ */ #define LINUX_KERNEL #ifndef LINUX_KERNEL #include <sys/asm.h> #include <sys/regdef.h> #define EXC(inst_reg,addr,handler) \ inst_reg, addr; #define EXCQ(inst,reg1,reg2,addr,handler) \ inst reg1, reg2, addr; #if _MIPS_SIM == _ABI64 #define CONFIG_64BIT #else #define CONFIG_32BIT #endif #define FEXPORT(symbol) #else /* LINUX_KERNEL */ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/cpu.h> #include <asm/export.h> #include <asm/regdef.h> #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #define EXCQ(inst,reg1,reg2,addr,handler) \ 9: inst reg1, reg2, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #endif #define dst a0 #define src a1 #define len a2 #define rem t8 /* * 64bit ABI vs 32bit ABI */ #ifdef CONFIG_64BIT #define ADDU daddu #define ADDI daddi #define SUBU dsubu #define SLL dsll #define SRL dsrl #define PTR_LA dla #define LOAD ld /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #else #define ADDU addu #define ADDI addi #define SUBU subu #define SLL sll #define SRL srl #define PTR_LA la #define LOAD lw #endif /* CONFIG_64BIT */ #define LDFIRST ldr #define LDREST ldl #define SDFIRST sdr #define SDREST sdl #define LWFIRST lwr #define LWREST lwl #define SWFIRST swr #define SWREST swl #define _MIPS_LSE_CPUCFG 1 /* void * memcpy (void *s1, const void *s2, size_t n); */ .text .align 5 .set noreorder .set noat .set loongson-ext2 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst .L__memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) /* if less then 0x28 bytes */ sltu t2, a2, 0x28 andi t0, dst, 0xf bnez t2, .L_memcpy_less andi t1, src, 0xf beqz t0, 1f ADDI rem, t0, -0x10 /* upgrade */ EXC( LDFIRST t3, 0(src), .Ll_exc) sltu t4, t0, 0x8 EXC( LDREST t3, 7(src), .Ll_exc_copy) SUBU src, rem EXC( SDFIRST t3, 0(dst), .Ls_exc) SUBU dst, rem beqz t4, 1f ADDU len, rem EXC( LDFIRST t3, -8(src), .Ll_exc_a8) EXC( LDREST t3, -1(src), .Ll_exc_copy_a8) EXC( sd t3, -8(dst), .Ls_exc_p8) 1: andi t7, src, 0x7 beq t0, t1, .L_memcpy_16_16 nop /* t7 is $15 */ ALTERNATIVE "bnez $15, .L_memcpy_16_4_2_1",\ "bnez $15, .L_memcpy_16_4_2_1_unalign",\ _MIPS_LSE_CPUCFG .L_memcpy_16_8: SRL t0, len, 6 # 64B per iteration beqz t0, 2f and rem, len, 0x3f .align 4 1: EXC( ld t4, (16 * 0)(src), .Ll_exc) EXC( ld t7, (8 + 16 * 0)(src), .Ll_exc_copy) EXC( ld t2, (16 * 1)(src), .Ll_exc_copy) EXC( ld t3, (8 + 16 * 1)(src), .Ll_exc_copy) EXC( ld t0, (16 * 2)(src), .Ll_exc_copy) EXC( ld t1, (8 + 16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXC( ld t4, (16 * 3)(src), .Ll_exc_copy) EXC( ld t7, (8 + 16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone 2: sltu t0, len, 32 bnez t0, 3f and rem, len, 0xf EXC( ld t2, (16 * 0)(src), .Ll_exc) EXC( ld t3, (8 + 16 * 0)(src), .Ll_exc_copy) EXC( ld t0, (16 * 1)(src), .Ll_exc_copy) EXC( ld t1, (8 + 16 * 1)(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 16 * 2 EXCQ( gssq, t3, t2, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t1, t0, (16 * 1)(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 32 3: /* copy less than 32B */ beq rem, len, .L_memcpy_1_15B_8B_aligned nop EXC( ld t2, 0(src), .Ll_exc) EXC( ld t3, 8(src), .Ll_exc_copy) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t3, t2, 0(dst), .Ls_exc_p16) bnez len, .L_memcpy_1_15B_8B_aligned ADDU dst, 16 jr ra nop .L_memcpy_16_16: .align 4 SRL t0, len, 7 # 128B per iteration beqz t0, 5f and rem, len, 0x7f 6: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 8 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p128) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p112) EXCQ( gslq, t3, t2, (16 * 4)(src), .Ll_exc_copy) EXCQ( gssq, t1, t0, (16 * 2)(dst), .Ls_exc_p96) EXCQ( gslq, t1, t0, (16 * 5)(src), .Ll_exc_copy) EXCQ( gssq, t7, t4, (16 * 3)(dst), .Ls_exc_p80) EXCQ( gslq, t7, t4, (16 * 6)(src), .Ll_exc_copy) EXCQ( gssq, t3, t2, (16 * 4)(dst), .Ls_exc_p64) EXCQ( gslq, t3, t2, (16 * 7)(src), .Ll_exc_copy) EXCQ( gssq, t1, t0, (16 * 5)(dst), .Ls_exc_p48) EXCQ( gssq, t7, t4, (16 * 6)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 7)(dst), .Ls_exc_p16) ADDU src, 16 * 8 bne len, rem, 6b ADDU dst, 16 * 8 beqz len, .Ldone 5: SRL t0, len, 6 # 64B per iteration beqz t0, 2f and rem, len, 0x3f 1: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone 2: sltu t0, len, 32 bnez t0, 3f and rem, len, 0xf EXCQ( gslq, t3, t2, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t1, t0, (16 * 1)(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 32 EXCQ( gssq, t3, t2, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t1, t0, (16 * 1)(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 32 3: /* copy less than 32B */ beq rem, len, .L_memcpy_1_15B_8B_aligned nop EXCQ( gslq, t3, t2, 0(src), .Ll_exc) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t3, t2, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 /* * copy 1 - 15B, src & dst are 8B aligned */ .L_memcpy_1_15B_8B_aligned: sltu t0, len, 0x9 bnez t0, 1f nop EXC( ld t1, (src), .Ll_exc) EXC( sd t1, (dst), .Ls_exc) 1: ADDU src, len ADDU dst, len EXC( LDREST t1, -1(src), .Ll_exc_copy_len) EXC( SDREST t1, -1(dst), .Ls_exc) .Ldone: jr ra move len, zero .L_memcpy_16_4_2_1_unalign: SRL t0, len, 6 # 64B per iteration beqz t0, .L_memcpy_32 and rem, len, 0x3f .align 4 1: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone nop .L_memcpy_32: SRL t0, len, 5 # 32B per iteration beqz t0, 2f and rem, len, 0x1f 1: EXCQ( gslq, t7, t4, 0(src), .Ll_exc) EXCQ( gslq, t3, t2, 16(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 16 * 2 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b ADDU dst, 16 * 2 beqz len, .Ldone 2: and rem, len, 0xf beq rem, len, .L_memcpy_less nop EXCQ( gslq, t1, t0, 0(src), .Ll_exc) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t1, t0, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 b .L_memcpy_less nop .L_memcpy_16_4_2_1: SRL t0, len, 5 # 32B per iteration beqz t0, 2f and rem, len, 0x1f 1: EXC( LDFIRST t4, 0(src), .Ll_exc) EXC( LDFIRST t7, 8(src), .Ll_exc_copy) ADDI len, -16 * 2 EXC( LDREST t4, 7(src), .Ll_exc_copy) EXC( LDREST t7, 15(src), .Ll_exc_copy) EXC( LDFIRST t2, 16(src), .Ll_exc_copy) EXC( LDFIRST t3, 24(src), .Ll_exc_copy) EXC( LDREST t2, 23(src), .Ll_exc_copy) EXC( LDREST t3, 31(src), .Ll_exc_copy) ADDU src, 16 * 2 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b ADDU dst, 16 * 2 beqz len, .Ldone 2: and rem, len, 0xf beq rem, len, .L_memcpy_less nop EXC( LDFIRST t0, 0(src), .Ll_exc) EXC( LDFIRST t1, 8(src), .Ll_exc_copy) ADDI len, -16 EXC( LDREST t0, 7(src), .Ll_exc_copy) EXC( LDREST t1, 15(src), .Ll_exc_copy) ADDU src, 16 EXCQ( gssq, t1, t0, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 .L_memcpy_less: andi t0, len, 0x7 beq t0, len, 2f andi t4, len, 0x3 .set reorder SUBU t1, len, t0 ADDU dst, t1 ADDU src, t1 .set at=t2 PTR_LA t3, 1f .set noat SLL t2, t1, 0x1 /* 4 * 4B instructions move 8B data*/ SUBU t3, t2 jr t3 .set noreorder EXC( LDFIRST t1, (-8 * 4)(src), .Ll_exc_a32) EXC( LDREST t1, (-8 * 4 + 7)(src), .Ll_exc_copy_a32) EXC( SDFIRST t1, (-8 * 4)(dst), .Ls_exc) EXC( SDREST t1, (-8 * 4 + 7)(dst), .Ls_exc) EXC( LDFIRST t2, (-8 * 3)(src), .Ll_exc_a24) EXC( LDREST t2, (-8 * 3 + 7)(src), .Ll_exc_copy_a24) EXC( SDFIRST t2, (-8 * 3)(dst), .Ls_exc_r24) EXC( SDREST t2, (-8 * 3 + 7)(dst), .Ls_exc_r24) EXC( LDFIRST t3, (-8 * 2)(src), .Ll_exc_a16) EXC( LDREST t3, (-8 * 2 + 7)(src), .Ll_exc_copy_a16) EXC( SDFIRST t3, (-8 * 2)(dst), .Ls_exc_r16) EXC( SDREST t3, (-8 * 2 + 7)(dst), .Ls_exc_r16) EXC( LDFIRST t1, (-8 * 1)(src), .Ll_exc_a8) EXC( LDREST t1, (-8 * 1 + 7)(src), .Ll_exc_copy_a8) EXC( SDFIRST t1, (-8 * 1)(dst), .Ls_exc_r8) EXC( SDREST t1, (-8 * 1 + 7)(dst), .Ls_exc_r8) 1: beqz t0, .Ldone ADDU src, t0 ADDU dst, t0 EXC( LDFIRST t2, -8(src), .Ll_exc_a8) EXC( LDREST t2, -1(src), .Ll_exc_copy_a8) EXC( SDFIRST t2, -8(dst), .Ls_exc_r0) EXC( SDREST t2, -1(dst), .Ls_exc_r0) jr ra move len, zero 2: beq t4, len, 3f nop EXC( LWFIRST t2, (src), .Ll_exc) EXC( LWREST t2, 3(src), .Ll_exc_copy) ADDU src, len EXC( SWFIRST t2, (dst), .Ls_exc) EXC( SWREST t2, 3(dst), .Ls_exc) beqz t4, .Ldone ADDU dst, len EXC( LWFIRST t1, -4(src), .Ll_exc_a4) EXC( LWREST t1, -1(src), .Ll_exc_copy_a4) EXC( SWFIRST t1, -4(dst), .Ls_exc) EXC( SWREST t1, -1(dst), .Ls_exc) jr ra move len, zero 3: beqz len, .Ldone ADDU t0, src, len 1: EXC( lb t2, (src), .Ll_exc) ADDU src, 1 EXC( sb t2, (dst), .Ls_exc) bne t0, src, 1b ADDU dst, 1 jr ra move len, zero END(memcpy) #ifdef LINUX_KERNEL #define LEXC_a(n) \ .Ll_exc_copy_a ## n: \ ADDI src, -n; \ b .Ll_exc_copy; \ ADDI dst, -n; \ .Ll_exc_a ## n: \ ADDI src, -n; \ b .Ll_exc; \ ADDI dst, -n; .Ll_exc_copy_len: SUBU src, len b .Ll_exc_copy SUBU dst, len LEXC_a(4) LEXC_a(8) LEXC_a(16) LEXC_a(24) LEXC_a(32) LEXC_a(40) LEXC_a(48) .Ll_exc_copy: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOAD t0, TI_TASK($28) nop LOAD t0, THREAD_BUADDR(t0) 1: EXC( lb t1, 0(src), .Ll_exc) ADDU src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user bne src, t0, 1b ADDU dst, 1 .Ll_exc: LOAD t0, TI_TASK($28) nop LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUBU len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC_p(n) \ .Ls_exc_p ## n: \ jr ra; \ ADDU len, n; #define SEXC_r(n) \ .Ls_exc_r ## n: \ jr ra; \ ADDI len, t0, n; SEXC_p(1) SEXC_p(8) SEXC_p(16) SEXC_p(32) SEXC_p(48) SEXC_p(64) SEXC_p(80) SEXC_p(96) SEXC_p(112) SEXC_p(128) SEXC_r(0) SEXC_r(8) SEXC_r(16) SEXC_r(24) .Ls_exc: jr ra nop .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADDU t0, a0, a2 ADDU t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, .L__memcpy move v0, a0 /* return value */ beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, .Lr_end_bytes_up # src >= dst nop ADDU a0, a2 # dst = dst + len ADDU a1, a2 # src = src + len .Lr_end_bytes: lb t0, -1(a1) ADDI a2, -0x1 sb t0, -1(a0) ADDI a1, -0x1 bnez a2, .Lr_end_bytes ADDI a0, -0x1 .Lr_out: jr ra move a2, zero .Lr_end_bytes_up: lb t0, (a1) ADDI a2, -0x1 sb t0, (a0) ADDU a1, 0x1 bnez a2, .Lr_end_bytes_up ADDU a0, 0x1 jr ra move a2, zero END(__rmemcpy) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,516
arch/mips/loongson64/loongson-3/lasx.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2019 Huang Pei <huangpei@loongson.cn> * Copyright (C) 2019 Zeng Lu <zenglu@loongson.cn> */ #include <asm/asmmacro.h> #include <asm/errno.h> /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ #undef fp #ifdef CONFIG_CPU_HAS_LASX .macro lasx_init_upper wd .set push .set noat xinsert_d \wd, 2 xinsert_d \wd, 3 .endm .macro lasx_init_uppest wd .set push .set noat insert_d \wd, 1 xinsert_d \wd, 2 xinsert_d \wd, 3 .endm /* * Save a thread's LASX vector context. */ LEAF(_save_lasx) .set push .set noat .set noreorder SET_HARDFLOAT _cfcmsa $1, MSA_CSR sw $1, THREAD_MSA_CSR(a0) xvst_b 16, THREAD_FPR16, a0 xvst_b_off 17, (THREAD_FPR1-THREAD_FPR0) xvst_b_off 18, (THREAD_FPR2-THREAD_FPR0) xvst_b_off 19, (THREAD_FPR3-THREAD_FPR0) xvst_b_off 20, (THREAD_FPR4-THREAD_FPR0) xvst_b_off 21, (THREAD_FPR5-THREAD_FPR0) xvst_b_off 22, (THREAD_FPR6-THREAD_FPR0) xvst_b_off 23, (THREAD_FPR7-THREAD_FPR0) xvst_b_off 24, (THREAD_FPR8-THREAD_FPR0) xvst_b_off 25, (THREAD_FPR9-THREAD_FPR0) xvst_b_off 26, (THREAD_FPR10-THREAD_FPR0) xvst_b_off 27, (THREAD_FPR11-THREAD_FPR0) xvst_b_off 28, (THREAD_FPR12-THREAD_FPR0) xvst_b_off 29, (THREAD_FPR13-THREAD_FPR0) xvst_b_off 30, (THREAD_FPR14-THREAD_FPR0) xvst_b_off 31, (THREAD_FPR15-THREAD_FPR0) xvst_b_off 0, (THREAD_FPR16-THREAD_FPR0) xvst_b_off 1, (THREAD_FPR17-THREAD_FPR0) xvst_b_off 2, (THREAD_FPR18-THREAD_FPR0) xvst_b_off 3, (THREAD_FPR19-THREAD_FPR0) xvst_b_off 4, (THREAD_FPR20-THREAD_FPR0) xvst_b_off 5, (THREAD_FPR21-THREAD_FPR0) xvst_b_off 6, (THREAD_FPR22-THREAD_FPR0) xvst_b_off 7, (THREAD_FPR23-THREAD_FPR0) xvst_b_off 8, (THREAD_FPR24-THREAD_FPR0) xvst_b_off 9, (THREAD_FPR25-THREAD_FPR0) xvst_b_off 10, (THREAD_FPR26-THREAD_FPR0) xvst_b_off 11, (THREAD_FPR27-THREAD_FPR0) xvst_b_off 12, (THREAD_FPR28-THREAD_FPR0) xvst_b_off 13, (THREAD_FPR29-THREAD_FPR0) xvst_b_off 14, (THREAD_FPR30-THREAD_FPR0) jr ra xvst_b_off 15, (THREAD_FPR31-THREAD_FPR0) END(_save_lasx) /* * Restore a thread's LASX vector context. */ LEAF(_restore_lasx) .set push .set noat SET_HARDFLOAT lw $1, THREAD_MSA_CSR(a0) _ctcmsa MSA_CSR, $1 .set pop xvld_b 16, THREAD_FPR16, a0 xvld_b_off 17, (THREAD_FPR1-THREAD_FPR0) xvld_b_off 18, (THREAD_FPR2-THREAD_FPR0) xvld_b_off 19, (THREAD_FPR3-THREAD_FPR0) xvld_b_off 20, (THREAD_FPR4-THREAD_FPR0) xvld_b_off 21, (THREAD_FPR5-THREAD_FPR0) xvld_b_off 22, (THREAD_FPR6-THREAD_FPR0) xvld_b_off 23, (THREAD_FPR7-THREAD_FPR0) xvld_b_off 24, (THREAD_FPR8-THREAD_FPR0) xvld_b_off 25, (THREAD_FPR9-THREAD_FPR0) xvld_b_off 26, (THREAD_FPR10-THREAD_FPR0) xvld_b_off 27, (THREAD_FPR11-THREAD_FPR0) xvld_b_off 28, (THREAD_FPR12-THREAD_FPR0) xvld_b_off 29, (THREAD_FPR13-THREAD_FPR0) xvld_b_off 30, (THREAD_FPR14-THREAD_FPR0) xvld_b_off 31, (THREAD_FPR15-THREAD_FPR0) xvld_b_off 0, (THREAD_FPR16-THREAD_FPR0) xvld_b_off 1, (THREAD_FPR17-THREAD_FPR0) xvld_b_off 2, (THREAD_FPR18-THREAD_FPR0) xvld_b_off 3, (THREAD_FPR19-THREAD_FPR0) xvld_b_off 4, (THREAD_FPR20-THREAD_FPR0) xvld_b_off 5, (THREAD_FPR21-THREAD_FPR0) xvld_b_off 6, (THREAD_FPR22-THREAD_FPR0) xvld_b_off 7, (THREAD_FPR23-THREAD_FPR0) xvld_b_off 8, (THREAD_FPR24-THREAD_FPR0) xvld_b_off 9, (THREAD_FPR25-THREAD_FPR0) xvld_b_off 10, (THREAD_FPR26-THREAD_FPR0) xvld_b_off 11, (THREAD_FPR27-THREAD_FPR0) xvld_b_off 12, (THREAD_FPR28-THREAD_FPR0) xvld_b_off 13, (THREAD_FPR29-THREAD_FPR0) xvld_b_off 14, (THREAD_FPR30-THREAD_FPR0) jr ra xvld_b_off 15, (THREAD_FPR31-THREAD_FPR0) END(_restore_lasx) LEAF(_init_lasx_upper) .set push .set noat .set noreorder SET_HARDFLOAT not $1, zero lasx_init_upper 0 xvseli_d 0xc, 0, 1 xvseli_d 0xc, 0, 2 xvseli_d 0xc, 0, 3 xvseli_d 0xc, 0, 4 xvseli_d 0xc, 0, 5 xvseli_d 0xc, 0, 6 xvseli_d 0xc, 0, 7 xvseli_d 0xc, 0, 8 xvseli_d 0xc, 0, 9 xvseli_d 0xc, 0, 10 xvseli_d 0xc, 0, 11 xvseli_d 0xc, 0, 12 xvseli_d 0xc, 0, 13 xvseli_d 0xc, 0, 14 xvseli_d 0xc, 0, 15 xvseli_d 0xc, 0, 16 xvseli_d 0xc, 0, 17 xvseli_d 0xc, 0, 18 xvseli_d 0xc, 0, 19 xvseli_d 0xc, 0, 20 xvseli_d 0xc, 0, 21 xvseli_d 0xc, 0, 22 xvseli_d 0xc, 0, 23 xvseli_d 0xc, 0, 24 xvseli_d 0xc, 0, 25 xvseli_d 0xc, 0, 26 xvseli_d 0xc, 0, 27 xvseli_d 0xc, 0, 28 xvseli_d 0xc, 0, 29 xvseli_d 0xc, 0, 30 jr ra xvseli_d 0xc, 0, 31 .set pop END(_init_lasx_upper) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,561
arch/mips/loongson64/loongson-3/loongson3-memset.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007 by Maciej W. Rozycki * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define LONG_S_L sdl #define LONG_S_R sdr #define STORSIZE 16 #define STORMASK 15 #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #define EX_GSSQ(reg, addr, handler) \ .set push; \ .set loongson-ext2; \ 9: gssq reg, reg, addr; \ .set pop; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous .macro f_fill128 dst, offset, val, fixup EX_GSSQ(\val, (\offset + 0 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 2 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 3 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 4 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 5 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 6 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 7 * STORSIZE)(\dst), \fixup) .endm /* * memset(void *s, int c, size_t n) * * a0: start of area to clear * a1: char to fill with * a2: size of area to clear */ .set noreorder .align 5 LEAF(memset) EXPORT_SYMBOL(memset) beqz a1, 1f move v0, a0 /* result */ andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 or a1, t1 LONG_SLL t1, a1, 16 or a1, t1 LONG_SLL t1, a1, 32 or a1, t1 1: FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset andi t0, a0, STORMASK /* aligned? */ .set noat li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword 8B aligned */ .set push .set arch=mips64r2 PTR_ADDIU t1, a0, 8 dins t1, zero, 0, 3 .set pop EX(LONG_S, a1, (t1), .Lsecond_fixup) /* May double copy 8B */ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ 1: ori t1, a2, 0x7f /* # of full blocks */ xori t1, 0x7f beqz t1, .Lmemset_partial /* no block to fill */ andi t0, a2, 0x80-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder 1: PTR_ADDIU a0, 128 f_fill128 a0, -128, a1, .Lfwd_fixup bne t1, a0, 1b .set noreorder .Lmemset_partial: PTR_LA t1, 2f /* where to start */ .set noat LONG_SRL AT, t0, 2 PTR_SUBU t1, AT .set at jr t1 PTR_ADDU a0, t0 /* dest ptr */ .set push .set noreorder .set nomacro f_fill128 a0, -128, a1, .Lpartial_fixup /* ... but first do 16Bs ... */ 2: .set pop andi a2, STORMASK /* At most 15B to go */ beqz a2, 1f PTR_ADDU a0, a2 /* What's left */ .set push .set arch=mips64r2 PTR_ADDI t1, a0, -8 dins t1, zero, 0, 3 .set pop EX(LONG_S, a1, (t1), .Lnotlast_fixup) /* May double copy 8B */ EX(LONG_S_L, a1, -1(a0), .Llast_fixup) 1: jr ra move a2, zero .Lsmall_memset: andi t1, a2, 7 beq t1, a2, 1f LONG_SLL t1, 2 EX(LONG_S_R, a1, (a0), .Lfirst_fixup) EX(LONG_S_L, a1, 7(a0), .Lsmall_memset_fixup) 1: PTR_LA t0, 2f PTR_SUBU t1, t0, t1 jr t1 PTR_ADDU a0, a2 EX(sb, a1, -7(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -6(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -5(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -4(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -3(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -2(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -1(a0), .Lsmall_memset_partial_fixup) 2: jr ra /* done */ move a2, zero END(memset) .Lsmall_memset_fixup: PTR_ADDIU t0, a0, 8 .set push .set arch=mips64r2 dins t0, zero, 0, 3 .set pop LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Lsmall_memset_partial_fixup: PTR_L t0, TI_TASK($28) LONG_L t0, THREAD_BUADDR(t0) jr ra LONG_SUBU a2, a0, t0 .Lfirst_fixup: jr ra nop .Lsecond_fixup: LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t1 .Lfwd_fixup: PTR_L t0, TI_TASK($28) andi a2, 0x7f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra LONG_SUBU a2, t0 .Lpartial_fixup: PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup: jr ra andi a2, 0x7 .Lnotlast_fixup: jr ra PTR_SUBU a2, a0, t1
AirFortressIlikara/LS2K0300-linux-4.19
3,974
arch/mips/loongson64/loongson-3/sleep.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Sleep helper for Loongson-3A sleep mode, derived from Au1xxx. * * Copyright (C) 2011 Huacai Chen <chenhc@lemote.com> */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> .extern cmos_write64 .extern loongson_nr_nodes .extern loongson_suspend_addr .extern loongson_pcache_ways .extern loongson_pcache_sets .extern loongson_pcache_linesz .extern loongson_scache_ways .extern loongson_scache_sets .extern loongson_scache_linesz .text .set push .set noat .set reorder .align 5 .set mips64 /* preparatory stuff */ .macro SETUP_SLEEP dsubu sp, PT_SIZE sd $1, PT_R1(sp) sd $2, PT_R2(sp) sd $3, PT_R3(sp) sd $4, PT_R4(sp) sd $5, PT_R5(sp) sd $6, PT_R6(sp) sd $7, PT_R7(sp) sd $16, PT_R16(sp) sd $17, PT_R17(sp) sd $18, PT_R18(sp) sd $19, PT_R19(sp) sd $20, PT_R20(sp) sd $21, PT_R21(sp) sd $22, PT_R22(sp) sd $23, PT_R23(sp) sd $26, PT_R26(sp) sd $27, PT_R27(sp) sd $28, PT_R28(sp) sd $30, PT_R30(sp) sd $31, PT_R31(sp) mfc0 k0, CP0_STATUS sw k0, PT_R8(sp) mfc0 k0, CP0_CONFIG, 0 sw k0, PT_R9(sp) mfc0 k0, CP0_PAGEMASK, 0 /* Loongson-3 has sub-registers */ sw k0, PT_R10(sp) mfc0 k0, CP0_PAGEMASK, 1 /* Loongson-3 has sub-registers */ sw k0, PT_R11(sp) dmfc0 k0, CP0_CONTEXT sd k0, PT_R12(sp) dmfc0 k0, CP0_XCONTEXT sd k0, PT_R13(sp) /* Now set up the "wakeup vector" in RTC space so the boot rom will * return to this point upon wakeup. * 0x40 : RA, 0x48 : SP */ PTR_LA k0, acpi_saved_sp_s3 sd sp, (k0) daddi a0, sp, 0 li a1, 0x48 jal cmos_write64 dla a0, wakeup_start /* resume path */ li a1, 0x40 jal cmos_write64 .endm /* Sleep code for Loongson-3 */ LEAF(loongson_suspend_enter) SETUP_SLEEP /* a0:address a1:L1_sets a2:L1_ways a3:L1_linesize */ li a0, 0x80000000 lw a1, loongson_pcache_sets lw a2, loongson_pcache_ways lw a3, loongson_pcache_linesz flushL1: move t0, a2 1: cache 0, (a0) cache 1, (a0) addiu a0, a0, 1 addiu t0, t0, -1 bnez t0, 1b subu a0, a0, a2 addu a0, a0, a3 addiu a1, a1, -1 bnez a1, flushL1 /* a0:nr_nodes a1:address a2:L2_sets a3:L2_ways t8:L2_linesize */ lw a0, loongson_nr_nodes dli a1, 0x9800000000000000 lw a3, loongson_scache_ways lw t8, loongson_scache_linesz flushL2_all: lw a2, loongson_scache_sets dli t9, 0x100000000000 flushL2_node: move t0, a3 1: cache 3, (a1) daddiu a1, a1, 1 addiu t0, t0, -1 bnez t0, 1b dsubu a1, a1, a3 daddu a1, a1, t8 addiu a2, a2, -1 bnez a2, flushL2_node daddu a1, a1, t9 addiu a0, a0, -1 bnez a0, flushL2_all /* Pass RA and SP to BIOS, for machines without CMOS RAM */ daddi a1, sp, 0 dla a0, wakeup_start ld v0, loongson_suspend_addr /* Call BIOS's STR sleep routine */ jr v0 nop END(loongson_suspend_enter) .macro SETUP_WAKEUP lw k0, PT_R8(sp) mtc0 k0, CP0_STATUS lw k0, PT_R9(sp) mtc0 k0, CP0_CONFIG, 0 lw k0, PT_R10(sp) mtc0 k0, CP0_PAGEMASK, 0 lw k0, PT_R11(sp) mtc0 k0, CP0_PAGEMASK, 1 ld k0, PT_R12(sp) dmtc0 k0, CP0_CONTEXT ld k0, PT_R13(sp) dmtc0 k0, CP0_XCONTEXT nop ld $1, PT_R1(sp) ld $2, PT_R2(sp) ld $3, PT_R3(sp) ld $4, PT_R4(sp) ld $5, PT_R5(sp) ld $6, PT_R6(sp) ld $7, PT_R7(sp) ld $16, PT_R16(sp) ld $17, PT_R17(sp) ld $18, PT_R18(sp) ld $19, PT_R19(sp) ld $20, PT_R20(sp) ld $21, PT_R21(sp) ld $22, PT_R22(sp) ld $23, PT_R23(sp) ld $26, PT_R26(sp) ld $27, PT_R27(sp) ld $28, PT_R28(sp) ld $30, PT_R30(sp) ld $31, PT_R31(sp) .endm /* This is where we return upon wakeup. * Reload all of the registers and return. */ LEAF(wakeup_start) SETUP_WAKEUP daddiu sp, PT_SIZE jr ra END(wakeup_start) LEAF(loongson_acpi_wakeup_start) PTR_LA k0, acpi_saved_sp_s3 ld sp, (k0) SETUP_WAKEUP daddiu sp, PT_SIZE jr ra END(loongson_acpi_wakeup_start) .set pop
AirFortressIlikara/LS2K0300-linux-4.19
13,320
arch/mips/loongson64/loongson-2k/loongson3-memcpy.S
/* ============================================================================ Name : memcpy.S Author : Heiher <root@heiher.info> Chen Jie <chenj@lemote.com> Version : 20140307 Copyright : GPLv2 Description : The memcpy for Loongson 3. ============================================================================ */ #define LINUX_KERNEL #ifndef LINUX_KERNEL #include <sys/asm.h> #include <sys/regdef.h> #define EXC(inst_reg,addr,handler) \ inst_reg, addr; #define EXCQ(inst,reg1,reg2,addr,handler) \ inst reg1, reg2, addr; #if _MIPS_SIM == _ABI64 #define CONFIG_64BIT #else #define CONFIG_32BIT #endif #define FEXPORT(symbol) #else /* LINUX_KERNEL */ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/cpu.h> #include <asm/export.h> #include <asm/regdef.h> #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #define EXCQ(inst,reg1,reg2,addr,handler) \ 9: inst reg1, reg2, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #endif #define dst a0 #define src a1 #define len a2 #define rem t8 /* * 64bit ABI vs 32bit ABI */ #ifdef CONFIG_64BIT #define ADDU daddu #define ADDI daddi #define SUBU dsubu #define SLL dsll #define SRL dsrl #define PTR_LA dla #define LOAD ld /* * As we are sharing code base with the mips32 tree (which use the o32 ABI * register definitions). We need to redefine the register definitions from * the n64 ABI register naming to the o32 ABI register naming. */ #undef t0 #undef t1 #undef t2 #undef t3 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #else #define ADDU addu #define ADDI addi #define SUBU subu #define SLL sll #define SRL srl #define PTR_LA la #define LOAD lw #endif /* CONFIG_64BIT */ #define LDFIRST ldr #define LDREST ldl #define SDFIRST sdr #define SDREST sdl #define LWFIRST lwr #define LWREST lwl #define SWFIRST swr #define SWREST swl #define _MIPS_LSE_CPUCFG 1 /* void * memcpy (void *s1, const void *s2, size_t n); */ .text .align 5 .set noreorder .set noat .set loongson-ext2 LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst .L__memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) /* if less then 0x28 bytes */ sltu t2, a2, 0x28 andi t0, dst, 0xf bnez t2, .L_memcpy_less andi t1, src, 0xf beqz t0, 1f ADDI rem, t0, -0x10 /* upgrade */ EXC( LDFIRST t3, 0(src), .Ll_exc) sltu t4, t0, 0x8 EXC( LDREST t3, 7(src), .Ll_exc_copy) SUBU src, rem EXC( SDFIRST t3, 0(dst), .Ls_exc) SUBU dst, rem beqz t4, 1f ADDU len, rem EXC( LDFIRST t3, -8(src), .Ll_exc_a8) EXC( LDREST t3, -1(src), .Ll_exc_copy_a8) EXC( sd t3, -8(dst), .Ls_exc_p8) 1: andi t7, src, 0x7 beq t0, t1, .L_memcpy_16_16 nop /* t7 is $15 */ ALTERNATIVE "bnez $15, .L_memcpy_16_4_2_1",\ "bnez $15, .L_memcpy_16_4_2_1_unalign",\ _MIPS_LSE_CPUCFG .L_memcpy_16_8: SRL t0, len, 6 # 64B per iteration beqz t0, 2f and rem, len, 0x3f .align 4 1: EXC( ld t4, (16 * 0)(src), .Ll_exc) EXC( ld t7, (8 + 16 * 0)(src), .Ll_exc_copy) EXC( ld t2, (16 * 1)(src), .Ll_exc_copy) EXC( ld t3, (8 + 16 * 1)(src), .Ll_exc_copy) EXC( ld t0, (16 * 2)(src), .Ll_exc_copy) EXC( ld t1, (8 + 16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXC( ld t4, (16 * 3)(src), .Ll_exc_copy) EXC( ld t7, (8 + 16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone 2: sltu t0, len, 32 bnez t0, 3f and rem, len, 0xf EXC( ld t2, (16 * 0)(src), .Ll_exc) EXC( ld t3, (8 + 16 * 0)(src), .Ll_exc_copy) EXC( ld t0, (16 * 1)(src), .Ll_exc_copy) EXC( ld t1, (8 + 16 * 1)(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 16 * 2 EXCQ( gssq, t3, t2, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t1, t0, (16 * 1)(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 32 3: /* copy less than 32B */ beq rem, len, .L_memcpy_1_15B_8B_aligned nop EXC( ld t2, 0(src), .Ll_exc) EXC( ld t3, 8(src), .Ll_exc_copy) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t3, t2, 0(dst), .Ls_exc_p16) bnez len, .L_memcpy_1_15B_8B_aligned ADDU dst, 16 jr ra nop .L_memcpy_16_16: .align 4 SRL t0, len, 7 # 128B per iteration beqz t0, 5f and rem, len, 0x7f 6: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 8 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p128) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p112) EXCQ( gslq, t3, t2, (16 * 4)(src), .Ll_exc_copy) EXCQ( gssq, t1, t0, (16 * 2)(dst), .Ls_exc_p96) EXCQ( gslq, t1, t0, (16 * 5)(src), .Ll_exc_copy) EXCQ( gssq, t7, t4, (16 * 3)(dst), .Ls_exc_p80) EXCQ( gslq, t7, t4, (16 * 6)(src), .Ll_exc_copy) EXCQ( gssq, t3, t2, (16 * 4)(dst), .Ls_exc_p64) EXCQ( gslq, t3, t2, (16 * 7)(src), .Ll_exc_copy) EXCQ( gssq, t1, t0, (16 * 5)(dst), .Ls_exc_p48) EXCQ( gssq, t7, t4, (16 * 6)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 7)(dst), .Ls_exc_p16) ADDU src, 16 * 8 bne len, rem, 6b ADDU dst, 16 * 8 beqz len, .Ldone 5: SRL t0, len, 6 # 64B per iteration beqz t0, 2f and rem, len, 0x3f 1: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone 2: sltu t0, len, 32 bnez t0, 3f and rem, len, 0xf EXCQ( gslq, t3, t2, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t1, t0, (16 * 1)(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 32 EXCQ( gssq, t3, t2, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t1, t0, (16 * 1)(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 32 3: /* copy less than 32B */ beq rem, len, .L_memcpy_1_15B_8B_aligned nop EXCQ( gslq, t3, t2, 0(src), .Ll_exc) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t3, t2, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 /* * copy 1 - 15B, src & dst are 8B aligned */ .L_memcpy_1_15B_8B_aligned: sltu t0, len, 0x9 bnez t0, 1f nop EXC( ld t1, (src), .Ll_exc) EXC( sd t1, (dst), .Ls_exc) 1: ADDU src, len ADDU dst, len EXC( LDREST t1, -1(src), .Ll_exc_copy_len) EXC( SDREST t1, -1(dst), .Ls_exc) .Ldone: jr ra move len, zero .L_memcpy_16_4_2_1_unalign: SRL t0, len, 6 # 64B per iteration beqz t0, .L_memcpy_32 and rem, len, 0x3f .align 4 1: EXCQ( gslq, t7, t4, (16 * 0)(src), .Ll_exc) EXCQ( gslq, t3, t2, (16 * 1)(src), .Ll_exc_copy) EXCQ( gslq, t1, t0, (16 * 2)(src), .Ll_exc_copy) ADDI len, -16 * 4 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p64) EXCQ( gslq, t7, t4, (16 * 3)(src), .Ll_exc_copy) ADDU src, 16 * 4 ADDU dst, 16 * 4 EXCQ( gssq, t3, t2, (-16 * 3)(dst), .Ls_exc_p48) EXCQ( gssq, t1, t0, (-16 * 2)(dst), .Ls_exc_p32) EXCQ( gssq, t7, t4, (-16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b nop beqz len, .Ldone nop .L_memcpy_32: SRL t0, len, 5 # 32B per iteration beqz t0, 2f and rem, len, 0x1f 1: EXCQ( gslq, t7, t4, 0(src), .Ll_exc) EXCQ( gslq, t3, t2, 16(src), .Ll_exc_copy) ADDI len, -16 * 2 ADDU src, 16 * 2 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b ADDU dst, 16 * 2 beqz len, .Ldone 2: and rem, len, 0xf beq rem, len, .L_memcpy_less nop EXCQ( gslq, t1, t0, 0(src), .Ll_exc) ADDI len, -16 ADDU src, 16 EXCQ( gssq, t1, t0, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 b .L_memcpy_less nop .L_memcpy_16_4_2_1: SRL t0, len, 5 # 32B per iteration beqz t0, 2f and rem, len, 0x1f 1: EXC( LDFIRST t4, 0(src), .Ll_exc) EXC( LDFIRST t7, 8(src), .Ll_exc_copy) ADDI len, -16 * 2 EXC( LDREST t4, 7(src), .Ll_exc_copy) EXC( LDREST t7, 15(src), .Ll_exc_copy) EXC( LDFIRST t2, 16(src), .Ll_exc_copy) EXC( LDFIRST t3, 24(src), .Ll_exc_copy) EXC( LDREST t2, 23(src), .Ll_exc_copy) EXC( LDREST t3, 31(src), .Ll_exc_copy) ADDU src, 16 * 2 EXCQ( gssq, t7, t4, (16 * 0)(dst), .Ls_exc_p32) EXCQ( gssq, t3, t2, (16 * 1)(dst), .Ls_exc_p16) bne len, rem, 1b ADDU dst, 16 * 2 beqz len, .Ldone 2: and rem, len, 0xf beq rem, len, .L_memcpy_less nop EXC( LDFIRST t0, 0(src), .Ll_exc) EXC( LDFIRST t1, 8(src), .Ll_exc_copy) ADDI len, -16 EXC( LDREST t0, 7(src), .Ll_exc_copy) EXC( LDREST t1, 15(src), .Ll_exc_copy) ADDU src, 16 EXCQ( gssq, t1, t0, 0(dst), .Ls_exc_p16) beqz len, .Ldone ADDU dst, 16 .L_memcpy_less: andi t0, len, 0x7 beq t0, len, 2f andi t4, len, 0x3 .set reorder SUBU t1, len, t0 ADDU dst, t1 ADDU src, t1 .set at=t2 PTR_LA t3, 1f .set noat SLL t2, t1, 0x1 /* 4 * 4B instructions move 8B data*/ SUBU t3, t2 jr t3 .set noreorder EXC( LDFIRST t1, (-8 * 4)(src), .Ll_exc_a32) EXC( LDREST t1, (-8 * 4 + 7)(src), .Ll_exc_copy_a32) EXC( SDFIRST t1, (-8 * 4)(dst), .Ls_exc) EXC( SDREST t1, (-8 * 4 + 7)(dst), .Ls_exc) EXC( LDFIRST t2, (-8 * 3)(src), .Ll_exc_a24) EXC( LDREST t2, (-8 * 3 + 7)(src), .Ll_exc_copy_a24) EXC( SDFIRST t2, (-8 * 3)(dst), .Ls_exc_r24) EXC( SDREST t2, (-8 * 3 + 7)(dst), .Ls_exc_r24) EXC( LDFIRST t3, (-8 * 2)(src), .Ll_exc_a16) EXC( LDREST t3, (-8 * 2 + 7)(src), .Ll_exc_copy_a16) EXC( SDFIRST t3, (-8 * 2)(dst), .Ls_exc_r16) EXC( SDREST t3, (-8 * 2 + 7)(dst), .Ls_exc_r16) EXC( LDFIRST t1, (-8 * 1)(src), .Ll_exc_a8) EXC( LDREST t1, (-8 * 1 + 7)(src), .Ll_exc_copy_a8) EXC( SDFIRST t1, (-8 * 1)(dst), .Ls_exc_r8) EXC( SDREST t1, (-8 * 1 + 7)(dst), .Ls_exc_r8) 1: beqz t0, .Ldone ADDU src, t0 ADDU dst, t0 EXC( LDFIRST t2, -8(src), .Ll_exc_a8) EXC( LDREST t2, -1(src), .Ll_exc_copy_a8) EXC( SDFIRST t2, -8(dst), .Ls_exc_r0) EXC( SDREST t2, -1(dst), .Ls_exc_r0) jr ra move len, zero 2: beq t4, len, 3f nop EXC( LWFIRST t2, (src), .Ll_exc) EXC( LWREST t2, 3(src), .Ll_exc_copy) ADDU src, len EXC( SWFIRST t2, (dst), .Ls_exc) EXC( SWREST t2, 3(dst), .Ls_exc) beqz t4, .Ldone ADDU dst, len EXC( LWFIRST t1, -4(src), .Ll_exc_a4) EXC( LWREST t1, -1(src), .Ll_exc_copy_a4) EXC( SWFIRST t1, -4(dst), .Ls_exc) EXC( SWREST t1, -1(dst), .Ls_exc) jr ra move len, zero 3: beqz len, .Ldone ADDU t0, src, len 1: EXC( lb t2, (src), .Ll_exc) ADDU src, 1 EXC( sb t2, (dst), .Ls_exc) bne t0, src, 1b ADDU dst, 1 jr ra move len, zero END(memcpy) #ifdef LINUX_KERNEL #define LEXC_a(n) \ .Ll_exc_copy_a ## n: \ ADDI src, -n; \ b .Ll_exc_copy; \ ADDI dst, -n; \ .Ll_exc_a ## n: \ ADDI src, -n; \ b .Ll_exc; \ ADDI dst, -n; .Ll_exc_copy_len: SUBU src, len b .Ll_exc_copy SUBU dst, len LEXC_a(4) LEXC_a(8) LEXC_a(16) LEXC_a(24) LEXC_a(32) LEXC_a(40) LEXC_a(48) .Ll_exc_copy: /* * Copy bytes from src until faulting load address (or until a * lb faults) * * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) * may be more than a byte beyond the last address. * Hence, the lb below may get an exception. * * Assumes src < THREAD_BUADDR($28) */ LOAD t0, TI_TASK($28) nop LOAD t0, THREAD_BUADDR(t0) 1: EXC( lb t1, 0(src), .Ll_exc) ADDU src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user bne src, t0, 1b ADDU dst, 1 .Ll_exc: LOAD t0, TI_TASK($28) nop LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUBU len, AT, t0 # len number of uncopied bytes jr ra nop #define SEXC_p(n) \ .Ls_exc_p ## n: \ jr ra; \ ADDU len, n; #define SEXC_r(n) \ .Ls_exc_r ## n: \ jr ra; \ ADDI len, t0, n; SEXC_p(1) SEXC_p(8) SEXC_p(16) SEXC_p(32) SEXC_p(48) SEXC_p(64) SEXC_p(80) SEXC_p(96) SEXC_p(112) SEXC_p(128) SEXC_r(0) SEXC_r(8) SEXC_r(16) SEXC_r(24) .Ls_exc: jr ra nop .align 5 LEAF(memmove) EXPORT_SYMBOL(memmove) ADDU t0, a0, a2 ADDU t1, a1, a2 sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 beqz t0, .L__memcpy move v0, a0 /* return value */ beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 beqz t0, .Lr_end_bytes_up # src >= dst nop ADDU a0, a2 # dst = dst + len ADDU a1, a2 # src = src + len .Lr_end_bytes: lb t0, -1(a1) ADDI a2, -0x1 sb t0, -1(a0) ADDI a1, -0x1 bnez a2, .Lr_end_bytes ADDI a0, -0x1 .Lr_out: jr ra move a2, zero .Lr_end_bytes_up: lb t0, (a1) ADDI a2, -0x1 sb t0, (a0) ADDU a1, 0x1 bnez a2, .Lr_end_bytes_up ADDU a0, 0x1 jr ra move a2, zero END(__rmemcpy) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,561
arch/mips/loongson64/loongson-2k/loongson3-memset.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007 by Maciej W. Rozycki * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> #include <asm/export.h> #include <asm/regdef.h> #define LONG_S_L sdl #define LONG_S_R sdr #define STORSIZE 16 #define STORMASK 15 #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous #define EX_GSSQ(reg, addr, handler) \ .set push; \ .set loongson-ext2; \ 9: gssq reg, reg, addr; \ .set pop; \ .section __ex_table,"a"; \ PTR 9b, handler; \ .previous .macro f_fill128 dst, offset, val, fixup EX_GSSQ(\val, (\offset + 0 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 1 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 2 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 3 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 4 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 5 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 6 * STORSIZE)(\dst), \fixup) EX_GSSQ(\val, (\offset + 7 * STORSIZE)(\dst), \fixup) .endm /* * memset(void *s, int c, size_t n) * * a0: start of area to clear * a1: char to fill with * a2: size of area to clear */ .set noreorder .align 5 LEAF(memset) EXPORT_SYMBOL(memset) beqz a1, 1f move v0, a0 /* result */ andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 or a1, t1 LONG_SLL t1, a1, 16 or a1, t1 LONG_SLL t1, a1, 32 or a1, t1 1: FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset andi t0, a0, STORMASK /* aligned? */ .set noat li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword 8B aligned */ .set push .set arch=mips64r2 PTR_ADDIU t1, a0, 8 dins t1, zero, 0, 3 .set pop EX(LONG_S, a1, (t1), .Lsecond_fixup) /* May double copy 8B */ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ 1: ori t1, a2, 0x7f /* # of full blocks */ xori t1, 0x7f beqz t1, .Lmemset_partial /* no block to fill */ andi t0, a2, 0x80-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder 1: PTR_ADDIU a0, 128 f_fill128 a0, -128, a1, .Lfwd_fixup bne t1, a0, 1b .set noreorder .Lmemset_partial: PTR_LA t1, 2f /* where to start */ .set noat LONG_SRL AT, t0, 2 PTR_SUBU t1, AT .set at jr t1 PTR_ADDU a0, t0 /* dest ptr */ .set push .set noreorder .set nomacro f_fill128 a0, -128, a1, .Lpartial_fixup /* ... but first do 16Bs ... */ 2: .set pop andi a2, STORMASK /* At most 15B to go */ beqz a2, 1f PTR_ADDU a0, a2 /* What's left */ .set push .set arch=mips64r2 PTR_ADDI t1, a0, -8 dins t1, zero, 0, 3 .set pop EX(LONG_S, a1, (t1), .Lnotlast_fixup) /* May double copy 8B */ EX(LONG_S_L, a1, -1(a0), .Llast_fixup) 1: jr ra move a2, zero .Lsmall_memset: andi t1, a2, 7 beq t1, a2, 1f LONG_SLL t1, 2 EX(LONG_S_R, a1, (a0), .Lfirst_fixup) EX(LONG_S_L, a1, 7(a0), .Lsmall_memset_fixup) 1: PTR_LA t0, 2f PTR_SUBU t1, t0, t1 jr t1 PTR_ADDU a0, a2 EX(sb, a1, -7(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -6(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -5(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -4(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -3(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -2(a0), .Lsmall_memset_partial_fixup) EX(sb, a1, -1(a0), .Lsmall_memset_partial_fixup) 2: jr ra /* done */ move a2, zero END(memset) .Lsmall_memset_fixup: PTR_ADDIU t0, a0, 8 .set push .set arch=mips64r2 dins t0, zero, 0, 3 .set pop LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Lsmall_memset_partial_fixup: PTR_L t0, TI_TASK($28) LONG_L t0, THREAD_BUADDR(t0) jr ra LONG_SUBU a2, a0, t0 .Lfirst_fixup: jr ra nop .Lsecond_fixup: LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t1 .Lfwd_fixup: PTR_L t0, TI_TASK($28) andi a2, 0x7f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra LONG_SUBU a2, t0 .Lpartial_fixup: PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup: jr ra andi a2, 0x7 .Lnotlast_fixup: jr ra PTR_SUBU a2, a0, t1
AirFortressIlikara/LS2K0300-linux-4.19
3,434
arch/mips/loongson64/loongson-2k/sleep.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Sleep helper for Loongson2K sleep mode, derived from Au1xxx. * * Copyright (C) 2011 Huacai Chen <chenhc@lemote.com> */ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> .extern loongson2k_nr_nodes .extern loongson2k_suspend_addr .extern loongson2k_pcache_ways .extern loongson2k_pcache_sets .extern loongson2k_pcache_linesz .extern loongson2k_scache_ways .extern loongson2k_scache_sets .extern loongson2k_scache_linesz .text .set push .set noat .set reorder .align 5 .set mips64 /* preparatory stuff */ .macro SETUP_SLEEP dsubu sp, PT_SIZE sd $1, PT_R1(sp) sd $2, PT_R2(sp) sd $3, PT_R3(sp) sd $4, PT_R4(sp) sd $5, PT_R5(sp) sd $6, PT_R6(sp) sd $7, PT_R7(sp) sd $16, PT_R16(sp) sd $17, PT_R17(sp) sd $18, PT_R18(sp) sd $19, PT_R19(sp) sd $20, PT_R20(sp) sd $21, PT_R21(sp) sd $22, PT_R22(sp) sd $23, PT_R23(sp) sd $26, PT_R26(sp) sd $27, PT_R27(sp) sd $28, PT_R28(sp) sd $30, PT_R30(sp) sd $31, PT_R31(sp) mfc0 k0, CP0_STATUS sw k0, PT_R8(sp) mfc0 k0, CP0_CONFIG, 0 sw k0, PT_R9(sp) mfc0 k0, CP0_PAGEMASK, 0 sw k0, PT_R10(sp) mfc0 k0, CP0_PAGEMASK, 1 sw k0, PT_R11(sp) dmfc0 k0, CP0_CONTEXT sd k0, PT_R12(sp) dmfc0 k0, CP0_XCONTEXT sd k0, PT_R13(sp) .endm /* sleep code for Loongson2k memory controller type */ LEAF(loongson_suspend_lowlevel) SETUP_SLEEP /* a0:address a1:L1_sets a2:L1_ways a3:L1_linesize */ li a0, 0x80000000 lw a1, loongson2k_pcache_sets lw a2, loongson2k_pcache_ways lw a3, loongson2k_pcache_linesz flushL1: move t0, a2 1: cache 0, (a0) cache 1, (a0) addiu a0, a0, 1 addiu t0, t0, -1 bnez t0, 1b subu a0, a0, a2 addu a0, a0, a3 addiu a1, a1, -1 bnez a1, flushL1 /* a0:nr_nodes a1:address a2:L2_sets a3:L2_ways t8:L2_linesize */ lw a0, loongson2k_nr_nodes dli a1, 0x9800000000000000 lw a3, loongson2k_scache_ways lw t8, loongson2k_scache_linesz flushL2_all: lw a2, loongson2k_scache_sets dli t9, 0x100000000000 flushL2_node: move t0, a3 1: cache 3, (a1) daddiu a1, a1, 1 addiu t0, t0, -1 bnez t0, 1b dsubu a1, a1, a3 daddu a1, a1, t8 addiu a2, a2, -1 bnez a2, flushL2_node daddu a1, a1, t9 addiu a0, a0, -1 bnez a0, flushL2_all /* Pass RA and SP to BIOS, for machines without CMOS RAM */ daddi a1, sp, 0 dla a0, wakeup_start ld v0, loongson2k_suspend_addr /* Call BIOS's STR sleep routine */ jr v0 nop END(loongson_suspend_lowlevel) /* This is where we return upon wakeup. * Reload all of the registers and return. */ LEAF(wakeup_start) lw k0, PT_R8(sp) mtc0 k0, CP0_STATUS lw k0, PT_R9(sp) mtc0 k0, CP0_CONFIG, 0 lw k0, PT_R10(sp) mtc0 k0, CP0_PAGEMASK, 0 lw k0, PT_R11(sp) mtc0 k0, CP0_PAGEMASK, 1 ld k0, PT_R12(sp) dmtc0 k0, CP0_CONTEXT ld k0, PT_R13(sp) dmtc0 k0, CP0_XCONTEXT nop ld $1, PT_R1(sp) ld $2, PT_R2(sp) ld $3, PT_R3(sp) ld $4, PT_R4(sp) ld $5, PT_R5(sp) ld $6, PT_R6(sp) ld $7, PT_R7(sp) ld $16, PT_R16(sp) ld $17, PT_R17(sp) ld $18, PT_R18(sp) ld $19, PT_R19(sp) ld $20, PT_R20(sp) ld $21, PT_R21(sp) ld $22, PT_R22(sp) ld $23, PT_R23(sp) ld $26, PT_R26(sp) ld $27, PT_R27(sp) ld $28, PT_R28(sp) ld $30, PT_R30(sp) ld $31, PT_R31(sp) daddiu sp, PT_SIZE jr ra END(wakeup_start) .set pop
AirFortressIlikara/LS2K0300-linux-4.19
1,272
arch/mips/boot/compressed/head.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 1995 Waldorf Electronics * Written by Ralf Baechle and Andreas Busse * Copyright (C) 1995 - 1999 Ralf Baechle * Copyright (C) 1996 Paul M. Antoine * Modified for DECStation and hence R3000 support by Paul M. Antoine * Further modifications by David S. Miller and Harald Koerfgen * Copyright (C) 1999 Silicon Graphics, Inc. */ #include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> .set noreorder .cprestore LEAF(start) start: #ifdef CONFIG_CPU_LOONGSON3 mfc0 t0, CP0_STATUS or t0, ST0_MM mtc0 t0, CP0_STATUS #endif /* Save boot rom start args */ move s0, a0 move s1, a1 move s2, a2 move s3, a3 /* Clear BSS */ PTR_LA a0, _edata PTR_LA a2, _end 1: sw zero, 0(a0) bne a2, a0, 1b addiu a0, a0, 4 PTR_LA a0, (.heap) /* heap address */ PTR_LA sp, (.stack + 8192) /* stack address */ PTR_LA ra, 2f PTR_LA k0, decompress_kernel jr k0 nop 2: move a0, s0 move a1, s1 move a2, s2 move a3, s3 PTR_LI k0, KERNEL_ENTRY jr k0 nop 3: b 3b nop END(start) .comm .heap,BOOT_HEAP_SIZE,4 .comm .stack,4096*2,4
AirFortressIlikara/LS2K0300-linux-4.19
3,109
arch/mips/fw/lib/call_o32.S
/* * O32 interface for the 64 (or N32) ABI. * * Copyright (C) 2002, 2014 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <asm/asm.h> #include <asm/regdef.h> /* O32 register size. */ #define O32_SZREG 4 /* Maximum number of arguments supported. Must be even! */ #define O32_ARGC 32 /* Number of static registers we save. */ #define O32_STATC 11 /* Argument area frame size. */ #define O32_ARGSZ (O32_SZREG * O32_ARGC) /* Static register save area frame size. */ #define O32_STATSZ (SZREG * O32_STATC) /* Stack pointer register save area frame size. */ #define O32_SPSZ SZREG /* Combined area frame size. */ #define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ) /* Switched stack frame size. */ #define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ) .text /* * O32 function call dispatcher, for interfacing 32-bit ROM routines. * * The standard 64 (N32) calling sequence is supported, with a0 holding * a function pointer, a1 a pointer to the new stack to call the * function with or 0 if no stack switching is requested, a2-a7 -- the * function call's first six arguments, and the stack -- the remaining * arguments (up to O32_ARGC, including a2-a7). Static registers, gp * and fp are preserved, v0 holds the result. This code relies on the * called o32 function for sp and ra restoration and this dispatcher has * to be placed in a KSEGx (or KUSEG) address space. Any pointers * passed have to point to addresses within one of these spaces as well. */ NESTED(call_o32, O32_FRAMESZ, ra) REG_SUBU sp,O32_FRAMESZ REG_S ra,O32_FRAMESZ-1*SZREG(sp) REG_S fp,O32_FRAMESZ-2*SZREG(sp) REG_S gp,O32_FRAMESZ-3*SZREG(sp) REG_S s7,O32_FRAMESZ-4*SZREG(sp) REG_S s6,O32_FRAMESZ-5*SZREG(sp) REG_S s5,O32_FRAMESZ-6*SZREG(sp) REG_S s4,O32_FRAMESZ-7*SZREG(sp) REG_S s3,O32_FRAMESZ-8*SZREG(sp) REG_S s2,O32_FRAMESZ-9*SZREG(sp) REG_S s1,O32_FRAMESZ-10*SZREG(sp) REG_S s0,O32_FRAMESZ-11*SZREG(sp) move jp,a0 move fp,sp beqz a1,0f REG_SUBU fp,a1,O32_NFRAMESZ 0: REG_S sp,O32_NFRAMESZ-1*SZREG(fp) sll a0,a2,zero sll a1,a3,zero sll a2,a4,zero sll a3,a5,zero sw a6,4*O32_SZREG(fp) sw a7,5*O32_SZREG(fp) PTR_LA t0,O32_FRAMESZ(sp) PTR_LA t1,6*O32_SZREG(fp) li t2,O32_ARGC-6 1: lw t3,(t0) REG_ADDU t0,SZREG sw t3,(t1) REG_SUBU t2,1 REG_ADDU t1,O32_SZREG bnez t2,1b move sp,fp jalr jp REG_L sp,O32_NFRAMESZ-1*SZREG(sp) REG_L s0,O32_FRAMESZ-11*SZREG(sp) REG_L s1,O32_FRAMESZ-10*SZREG(sp) REG_L s2,O32_FRAMESZ-9*SZREG(sp) REG_L s3,O32_FRAMESZ-8*SZREG(sp) REG_L s4,O32_FRAMESZ-7*SZREG(sp) REG_L s5,O32_FRAMESZ-6*SZREG(sp) REG_L s6,O32_FRAMESZ-5*SZREG(sp) REG_L s7,O32_FRAMESZ-4*SZREG(sp) REG_L gp,O32_FRAMESZ-3*SZREG(sp) REG_L fp,O32_FRAMESZ-2*SZREG(sp) REG_L ra,O32_FRAMESZ-1*SZREG(sp) REG_ADDU sp,O32_FRAMESZ jr ra END(call_o32)
AirFortressIlikara/LS2K0300-linux-4.19
5,238
arch/mips/alchemy/common/sleeper.S
/* * Copyright 2002 Embedded Edge, LLC * Author: dan@embeddededge.com * * Sleep helper for Au1xxx sleep mode. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/asm.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> .extern __flush_cache_all .text .set noreorder .set noat .align 5 /* preparatory stuff */ .macro SETUP_SLEEP subu sp, PT_SIZE sw $1, PT_R1(sp) sw $2, PT_R2(sp) sw $3, PT_R3(sp) sw $4, PT_R4(sp) sw $5, PT_R5(sp) sw $6, PT_R6(sp) sw $7, PT_R7(sp) sw $16, PT_R16(sp) sw $17, PT_R17(sp) sw $18, PT_R18(sp) sw $19, PT_R19(sp) sw $20, PT_R20(sp) sw $21, PT_R21(sp) sw $22, PT_R22(sp) sw $23, PT_R23(sp) sw $26, PT_R26(sp) sw $27, PT_R27(sp) sw $28, PT_R28(sp) sw $30, PT_R30(sp) sw $31, PT_R31(sp) mfc0 k0, CP0_STATUS sw k0, 0x20(sp) mfc0 k0, CP0_CONTEXT sw k0, 0x1c(sp) mfc0 k0, CP0_PAGEMASK sw k0, 0x18(sp) mfc0 k0, CP0_CONFIG sw k0, 0x14(sp) /* flush caches to make sure context is in memory */ la t1, __flush_cache_all lw t0, 0(t1) jalr t0 nop /* Now set up the scratch registers so the boot rom will * return to this point upon wakeup. * sys_scratch0 : SP * sys_scratch1 : RA */ lui t3, 0xb190 /* sys_xxx */ sw sp, 0x0018(t3) la k0, alchemy_sleep_wakeup /* resume path */ sw k0, 0x001c(t3) .endm .macro DO_SLEEP /* put power supply and processor to sleep */ sw zero, 0x0078(t3) /* sys_slppwr */ sync sw zero, 0x007c(t3) /* sys_sleep */ sync nop nop nop nop nop nop nop nop .endm /* sleep code for Au1000/Au1100/Au1500 memory controller type */ LEAF(alchemy_sleep_au1000) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 1f .set arch=r4000 cache 0x14, 0(t0) cache 0x14, 32(t0) cache 0x14, 64(t0) cache 0x14, 96(t0) .set mips0 1: lui a0, 0xb400 /* mem_xxx */ sw zero, 0x001c(a0) /* Precharge */ sync sw zero, 0x0020(a0) /* Auto Refresh */ sync sw zero, 0x0030(a0) /* Sleep */ sync DO_SLEEP END(alchemy_sleep_au1000) /* sleep code for Au1550/Au1200 memory controller type */ LEAF(alchemy_sleep_au1550) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 1f .set arch=r4000 cache 0x14, 0(t0) cache 0x14, 32(t0) cache 0x14, 64(t0) cache 0x14, 96(t0) .set mips0 1: lui a0, 0xb400 /* mem_xxx */ sw zero, 0x08c0(a0) /* Precharge */ sync sw zero, 0x08d0(a0) /* Self Refresh */ sync /* wait for sdram to enter self-refresh mode */ lui t0, 0x0100 2: lw t1, 0x0850(a0) /* mem_sdstat */ and t2, t1, t0 beq t2, zero, 2b nop /* disable SDRAM clocks */ lui t0, 0xcfff ori t0, t0, 0xffff lw t1, 0x0840(a0) /* mem_sdconfiga */ and t1, t0, t1 /* clear CE[1:0] */ sw t1, 0x0840(a0) /* mem_sdconfiga */ sync DO_SLEEP END(alchemy_sleep_au1550) /* sleepcode for Au1300 memory controller type */ LEAF(alchemy_sleep_au1300) SETUP_SLEEP /* cache following instructions, as memory gets put to sleep */ la t0, 2f la t1, 4f subu t2, t1, t0 .set arch=r4000 1: cache 0x14, 0(t0) subu t2, t2, 32 bgez t2, 1b addu t0, t0, 32 .set mips0 2: lui a0, 0xb400 /* mem_xxx */ /* disable all ports in mem_sdportcfga */ sw zero, 0x868(a0) /* mem_sdportcfga */ sync /* disable ODT */ li t0, 0x03010000 sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sync /* precharge */ li t0, 0x23000400 sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sync /* auto refresh */ sw zero, 0x08c8(a0) /* mem_sdautoref */ sync /* block access to the DDR */ lw t0, 0x0848(a0) /* mem_sdconfigb */ li t1, (1 << 7 | 0x3F) or t0, t0, t1 sw t0, 0x0848(a0) /* mem_sdconfigb */ sync /* issue the Self Refresh command */ li t0, 0x10000000 sw t0, 0x08dc(a0) /* mem_sdcmd1 */ sw t0, 0x08d8(a0) /* mem_sdcmd0 */ sync /* wait for sdram to enter self-refresh mode */ lui t0, 0x0300 3: lw t1, 0x0850(a0) /* mem_sdstat */ and t2, t1, t0 bne t2, t0, 3b nop /* disable SDRAM clocks */ li t0, ~(3<<28) lw t1, 0x0840(a0) /* mem_sdconfiga */ and t1, t1, t0 /* clear CE[1:0] */ sw t1, 0x0840(a0) /* mem_sdconfiga */ sync DO_SLEEP 4: END(alchemy_sleep_au1300) /* This is where we return upon wakeup. * Reload all of the registers and return. */ LEAF(alchemy_sleep_wakeup) lw k0, 0x20(sp) mtc0 k0, CP0_STATUS lw k0, 0x1c(sp) mtc0 k0, CP0_CONTEXT lw k0, 0x18(sp) mtc0 k0, CP0_PAGEMASK lw k0, 0x14(sp) mtc0 k0, CP0_CONFIG /* We need to catch the early Alchemy SOCs with * the write-only Config[OD] bit and set it back to one... */ jal au1x00_fixup_config_od nop lw $1, PT_R1(sp) lw $2, PT_R2(sp) lw $3, PT_R3(sp) lw $4, PT_R4(sp) lw $5, PT_R5(sp) lw $6, PT_R6(sp) lw $7, PT_R7(sp) lw $16, PT_R16(sp) lw $17, PT_R17(sp) lw $18, PT_R18(sp) lw $19, PT_R19(sp) lw $20, PT_R20(sp) lw $21, PT_R21(sp) lw $22, PT_R22(sp) lw $23, PT_R23(sp) lw $26, PT_R26(sp) lw $27, PT_R27(sp) lw $28, PT_R28(sp) lw $30, PT_R30(sp) lw $31, PT_R31(sp) jr ra addiu sp, PT_SIZE END(alchemy_sleep_wakeup)
AirFortressIlikara/LS2K0300-linux-4.19
1,193
arch/powerpc/perf/bhrb.S
/* * Basic assembly code to read BHRB entries * * Copyright 2013 Anshuman Khandual, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <asm/ppc_asm.h> #include <asm/ppc-opcode.h> .text .balign 8 /* r3 = n (where n = [0-31]) * The maximum number of BHRB entries supported with PPC_MFBHRBE instruction * is 1024. We have limited number of table entries here as POWER8 implements * 32 BHRB entries. */ /* .global read_bhrb */ _GLOBAL(read_bhrb) cmpldi r3,31 bgt 1f ld r4,bhrb_table@got(r2) sldi r3,r3,3 add r3,r4,r3 mtctr r3 bctr 1: li r3,0 blr #define MFBHRB_TABLE1(n) PPC_MFBHRBE(R3,n); blr #define MFBHRB_TABLE2(n) MFBHRB_TABLE1(n); MFBHRB_TABLE1(n+1) #define MFBHRB_TABLE4(n) MFBHRB_TABLE2(n); MFBHRB_TABLE2(n+2) #define MFBHRB_TABLE8(n) MFBHRB_TABLE4(n); MFBHRB_TABLE4(n+4) #define MFBHRB_TABLE16(n) MFBHRB_TABLE8(n); MFBHRB_TABLE8(n+8) #define MFBHRB_TABLE32(n) MFBHRB_TABLE16(n); MFBHRB_TABLE16(n+16) bhrb_table: MFBHRB_TABLE32(0)