repo_id stringlengths 5 115 | size int64 590 5.01M | file_path stringlengths 4 212 | content stringlengths 590 5.01M |
|---|---|---|---|
XiaoMi/nnlib | 23,257 | hexagon/asm_src/gvconv2db2b2b2_d32_h_v60.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2db2b2b2u_d32_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* INPUT : R0 : uint8_t *in_bufe
* R1 : uint8_t *in_bufo
* R2 : uint8_t *out_bufe
* R3 : uint8_t *out_bufo
* R4 : uint8_t *weights
* R5 : int32_t in_width
* SP+#0 : int32_t next_out_width
* SP+#4 : int32_t out_width
* SP+#8 : int32_t stride_h_w
* SP+#12 : int32_t in_depth
* SP+#16 : int32_t filt_width
* SP+#20 : int32_t filt_height
* SP+#24 : int32_t out_height
* SP+#28 : const int32_t *biasbuf
* SP+#32 : const int32_t *suma
* SP+#36 : int32_t next_suma_row
* SP+#40 : int32_t *ptr_minmax
* SP+#44 : int32_t recip
* SP+#48 : int32_t recip_shift
*/
.text
.global gvconv2db2b2b2u_d32_asm
.balign 32
.type gvconv2db2b2b2u_d32_asm, @function
gvconv2db2b2b2u_d32_asm:
/*=============================================================================*/
#define SS (13*8)
#define APTR (SS+8)
#define in_bufe r0
#define in_bufo r1
#define in_bufo_in_bufe r1:0
#define out_bufe r2
#define out_bufo r3
#define out_bufo_out_bufe r3:2
#define weights r4
#define in_width r5
#define c_1w r7
#define c8w c_1w
#define xl03x00 r8
#define xl23x20 xl03x00
#define suma0 xl03x00
#define in_width_stride_depth xl03x00
#define sumabuf xl03x00
#define xl07x04 r9
#define xl27x24 xl07x04
#define next_out_width xl07x04
#define suma1 xl07x04
#define xl07x04_xl03x00 r9:8
#define xl27x24_xl23x20 xl07x04_xl03x00
#define next_out_width_in_width_stride_depth xl07x04_xl03x00
#define xl13x10 r10
#define xl33x30 xl13x10
#define xh03x00 xl13x10
#define xh13x10 xl13x10
#define xh23x20 xl13x10
#define xh33x30 xl13x10
#define in_bufet xl13x10
#define out_bufet xl13x10
#define recip_shift xl13x10
#define suma3 xl13x10
#define xl17x14 r11
#define xl37x34 xl17x14
#define xh07x04 xl17x14
#define xh17x14 xl17x14
#define xh27x24 xl17x14
#define xh37x34 xl17x14
#define in_bufot xl17x14
#define out_bufot xl17x14
#define recipshiftval xl17x14
#define suma2 xl17x14
#define xl17x14_xl13x10 r11:10
#define xl37x34_xl33x30 xl17x14_xl13x10
#define xh07x04_xh03x00 xl17x14_xl13x10
#define xh17x14_xh13x10 xl17x14_xl13x10
#define xh27x24_xh23x20 xl17x14_xl13x10
#define xh37x34_xh33x30 xl17x14_xl13x10
#define in_bufoet xl17x14_xl13x10
#define out_bufoet xl17x14_xl13x10
#define recipshiftval_recip_shift xl17x14_xl13x10
#define ptr_xl0 r12
#define ptr_xh0 r13
#define ptr_xl0_ptr_xh0 r13:12
#define ptr_xl1 r14
#define ptr_xh1 r15
#define ptr_wl r16
#define sumat ptr_wl
#define ptr_wh r17
#define sumainc ptr_wh
#define ptr_wh_wl r17:16
#define sumainc_sumat ptr_wh_wl
#define filt_wid r18
#define filt_ht r19
#define suma r20
#define stride_w r21
#define ptr_ze r22
#define ptr_zo r23
#define ptr_zo_ptr_ze r23:22
#define out_y r24
#define out_x4 r25
#define in_width_4 r30
#define next_outputs r31
/*=============================================================================*/
#define sll0 v0
#define sll1 v1
#define sll2 v2
#define sll3 v3
#define shl0 v4
#define shl1 v5
#define shl2 v6
#define shl3 v7
#define shh0 v8
#define s0 shh0
#define vsuma0 shh0
#define shh1 v9
#define s1 shh1
#define vsuma1 shh1
#define shh2 v10
#define s2 shh2
#define vsuma2 shh2
#define shh3 v11
#define vsuma3 shh3
#define wh0 v12
#define wh1 v13
#define wl0 v14
#define wl1 v15
#define min_val v16
#define max_val v17
#define recipvec v18
#define wsum v19
#define constw80 v20
#define sk v21
#define y0 v22
#define y1 v23
#define y1y0 v23:22
#define y2 v24
#define y3 v25
#define s3 v26
/*=============================================================================*/
#define off_ptr_wl ( 7*8+0)
#define off_ptr_wh ( 7*8+4)
#define off_in_bufe ( 8*8+0)
#define off_in_bufo ( 8*8+4)
#define off_out_bufe ( 9*8+0)
#define off_out_bufo ( 9*8+4)
#define off_in_width_stride_depth (10*8+0)
#define off_next_out_width (10*8+4)
#define off_recip (11*8+0)
/*=============================================================================*/
{ allocframe(#SS) //
memd(R29+#0*8-APTR) = R17:16 //
r8 = #0x80 //
sll0 = #0 //
}
{ memd(R29+#1*8) = R19:18 //
memd(R29+#2*8) = R21:20 //
constw80 = VSPLAT(r8) //
sll1 = #0 //
}
{ memd(R29+#3*8) = R23:22 //
memd(R29+#4*8) = R25:24 //
sll2 = #0 //
sll3 = #0 //
}
{ memd(R29+#5*8) = R27:26 //
memd(R29+#6*8) = R31:30 //
shl0 = constw80 //
shl1 = constw80 //
}
{ r9:8 = memd(r29+#APTR+0) // out_width|next_out_width
r11:10 = memd(r29+#APTR+8) // in_depth|stride_h_w
shl2 = constw80 //
shl3 = constw80 //
}
{ r13:12 = memd(r29+#APTR+16) // filt_height|filt_width
r15:14 = memd(r29+#APTR+24) // biasbuf|out_height
r22 = zxth(r10) // stride_w
r10 = lsr(r10,#16) // stride_h
}
{ r1:0 = memd(r29+#APTR+40) // recip|ptr_minmax
memd(r29+#off_in_bufe) = in_bufo_in_bufe //
filt_wid = asl(r12,#2) //filt_wid = filt_width*4
filt_ht = asr(r11,#5) // in_depth>>5
}
{ r6 = memw(r29+#APTR+48) // recip_shift
max_val = vmem(r0+#0) //
r22 = asl(r22,#2) // stride_w*4
r7 = #1 //
}
{ min_val = vmem(r0+#1) //
recipvec = VSPLAT(r1) //
memw(r29+#APTR+8) = r22 // stride_w
r22 = asl(r22,#3) // stride_w4
}
{ wsum = vmem(r15+#0) // biasbuf
r23 = mpyi(in_width,r11) // in_width,in_depth
in_width_4 = asl(in_width,#5) //
memd(r29+#off_out_bufe) = out_bufo_out_bufe //
}
{ r23 = mpyi(r23,r10) //in_width,in_depth*stride_h
r7 = asl(r7,r6) //
memw(r29+#off_next_out_width) = r8 //
memw(r29+#off_ptr_wl) = weights //
}
{ memw(r29+#off_in_width_stride_depth) = r23 //
r7 = combine(r7.l,r7.l) //
ptr_wh = mpyi(filt_wid,r13) // filt_wid*filt_height
filt_ht = mpyi(r13,filt_ht) // filt_height*in_depth>>5
}
{ memd(r29+#off_recip) = r7:6 //
ptr_wh = mpyi(ptr_wh,r11) // filt_width*filt_height*in_depth
next_outputs = mpyi(filt_ht,in_width_4) //
suma = memw(r29+#APTR+32) // recip|ptr_minmax
}
{ ptr_wh = addasl(weights,ptr_wh,#3) //
next_outputs += mpyi(r22,#-4) // -4*stride_w4
stride_w = memw(r29+#APTR+8) //
}
{ memw(r29+#off_ptr_wh) = ptr_wh //
m0 = r22 // stride_w4
r22 = mpyi(r22,#-3) // -3*stride_w4
suma0 = memw(suma+#0) //
}
{ r22 = add(r22,#+8) // -3*tsride_w4+8
vsuma0 = vsplat(suma0) //
suma1 = memw(suma+stride_w<<#0) //
suma2 = memw(suma+stride_w<<#1) //
}
{ m1 = r22 // -3*tsride_w4+8
shh0.w = vadd(wsum.w,vsuma0.w) //
suma += mpyi(stride_w,#3) //
}
{ vsuma1 = vsplat(suma1) //
vsuma2 = vsplat(suma2) //
suma3 = memw(suma+#0) //
suma = add(suma,stride_w) //
}
{ shh1.w = vadd(wsum.w,vsuma1.w) //
shh2.w = vadd(wsum.w,vsuma2.w) //
vsuma3 = vsplat(suma3) //
}
{ shh3.w = vadd(wsum.w,vsuma3.w) //
out_y = memw(r29+#APTR+24) // out_height
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{ ptr_xl0_ptr_xh0 = memd(r29+#off_in_bufe) //
next_out_width_in_width_stride_depth = memd(r29+#off_in_width_stride_depth)//
out_y = add(out_y, #-1) //
p0 = cmp.eq(out_y,#1) // last iteration?
}
{ in_bufet = add(ptr_xl0,in_width_stride_depth) //
in_bufot = add(ptr_xh0,in_width_stride_depth) //
ptr_zo_ptr_ze = memd(r29+#off_out_bufe) //
sumainc_sumat = memd(r29+#APTR+32) //
}
{ memd(r29+#off_in_bufe) = in_bufoet //
#if defined(SPLIT_OUTPUT)
out_bufet = add(ptr_ze,next_out_width) //
#else
out_bufet = addasl(ptr_ze,next_out_width,#1) //
#endif
out_bufot = add(ptr_zo,next_out_width) //
sumat = add(sumat,sumainc) //
}
{ out_x4 = memw(r29+#APTR+4) //
if (!p0) memw(r29+#APTR+32) = sumat //
loop1(.L_filt_height,filt_ht) //[p2]
}
{ memd(r29+#off_out_bufe) = out_bufoet //
out_x4 = add(out_x4,#-4) //[p2]
p1 = cmp.gt(out_x4,#4) //[p2] last iteration
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
.L_filt_height:
{ xl07x04_xl03x00 = memd(ptr_xl0+#0) //[p0]
ptr_xl1 = addasl(ptr_xl0,stride_w,#3) //
loop0(.L_filt_width, filt_wid) //
c8w = #8 //
}
{ xl17x14_xl13x10 = memd(ptr_xl1++M0) //[p0]
ptr_xl0 = add(ptr_xl0,in_width_4) //
ptr_xh1 = ptr_xh0 //
ptr_xh0 = add(ptr_xh0,in_width_4) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_width:
{ wh0.cur = vmem(ptr_wh++#1) //
shl0.uw += vrmpy(wh0.ub,xl03x00.ub) //
shl1.uw += vrmpy(wh0.ub,xl13x10.ub) //
}
{ wl0.cur = vmem(ptr_wl++#1) //
sll0.uw += vrmpy(wl0.ub,xl03x00.ub) //
sll1.uw += vrmpy(wl0.ub,xl13x10.ub) //
}
{ wh1.cur = vmem(ptr_wh++#1) //
shl0.uw += vrmpy(wh1.ub,xl07x04.ub) //
shl1.uw += vrmpy(wh1.ub,xl17x14.ub) //
}
{ wl1.cur = vmem(ptr_wl++#1) //
sll0.uw += vrmpy(wl1.ub,xl07x04.ub) //
sll1.uw += vrmpy(wl1.ub,xl17x14.ub) //
xh07x04_xh03x00 = memd(ptr_xh1++M0) //
}
{ shl0.uw += vrmpy(wl0.ub,xh03x00.ub) //
shh0.uw += vrmpy(wh0.ub,xh03x00.ub) //
xl27x24_xl23x20 = memd(ptr_xl1++M0) //
}
{ shl0.uw += vrmpy(wl1.ub,xh07x04.ub) //
shh0.uw += vrmpy(wh1.ub,xh07x04.ub) //
xl37x34_xl33x30 = memd(ptr_xl1++M1) //
}
{ shl2.uw += vrmpy(wh0.ub,xl23x20.ub) //
shl3.uw += vrmpy(wh0.ub,xl33x30.ub) //
}
{ sll2.uw += vrmpy(wl0.ub,xl23x20.ub) //
sll3.uw += vrmpy(wl0.ub,xl33x30.ub) //
}
{ shl2.uw += vrmpy(wh1.ub,xl27x24.ub) //
shl3.uw += vrmpy(wh1.ub,xl37x34.ub) //
}
{ sll2.uw += vrmpy(wl1.ub,xl27x24.ub) //
sll3.uw += vrmpy(wl1.ub,xl37x34.ub) //
xh17x14_xh13x10 = memd(ptr_xh1++M0) //
}
{ shl1.uw += vrmpy(wl0.ub,xh13x10.ub) //
shh1.uw += vrmpy(wh0.ub,xh13x10.ub) //
}
{ shl1.uw += vrmpy(wl1.ub,xh17x14.ub) //
shh1.uw += vrmpy(wh1.ub,xh17x14.ub) //
xh27x24_xh23x20 = memd(ptr_xh1++M0) //
}
{ shl2.uw += vrmpy(wl0.ub,xh23x20.ub) //
shh2.uw += vrmpy(wh0.ub,xh23x20.ub) //
}
{ shl2.uw += vrmpy(wl1.ub,xh27x24.ub) //
shh2.uw += vrmpy(wh1.ub,xh27x24.ub) //
xh37x34_xh33x30 = memd(ptr_xh1++M1) //
}
{ shl3.uw += vrmpy(wl0.ub,xh33x30.ub) //
shh3.uw += vrmpy(wh0.ub,xh33x30.ub) //
xl07x04_xl03x00 = memd(ptr_xl1++M0) //[p0]
}
{ shl3.uw += vrmpy(wl1.ub,xh37x34.ub) //
shh3.uw += vrmpy(wh1.ub,xh37x34.ub) //
xl17x14_xl13x10 = memd(ptr_xl1++M0) //[p0]
}:endloop0:endloop1
{ shl0.w += vasr(sll0.w,c8w) //
recipshiftval_recip_shift = memd(r29+#off_recip)//
sll0 = #0 //
sumabuf = memw(r29+#APTR+32) //
}
{ shl1.w += vasr(sll1.w,c8w) //
ptr_xl0 = sub(ptr_xl0,next_outputs) //
ptr_xh0 = sub(ptr_xh0,next_outputs) //
if (!p1) suma = sumabuf //
}
{ shh0.w += vasr(shl0.w,c8w) //
loop1(.L_filt_height,filt_ht) //[p2]
shl0 = constw80 //
suma0 = memw(suma+#0) //
}
{ shh1.w += vasr(shl1.w,c8w) //
min_val.w = vmin(min_val.w,shh0.w) //
max_val.w = vmax(max_val.w,shh0.w) //
sk = shh0 //
}
{ s0.w = vmpyi(shh0.w,recipshiftval.h) //s0=Q6_Vw_vasl_VwR(shh0.h,recip_shift)
shl2.w += vasr(sll2.w,c8w) //
shl1 = constw80 //
p2 = cmp.gt(out_x4,#1-4) //should s1 be included ?
}
{ if (p2) sk = shh1 //
s1.w = vmpyi(shh1.w,recipshiftval.h) //s1=Q6_Vw_vasl_VwR(shh1.h,recip_shift)
shl3.w += vasr(sll3.w,c8w) //
suma1 = memw(suma+stride_w<<#0) //
}
{ shh2.w += vasr(shl2.w,c8w) //
y0.w = vmpye(s0.w,recipvec.uh) //
min_val.w = vmin(min_val.w,sk.w) //
p2 = cmp.gt(out_x4,#2-4) //should s2 be included ?
}
{ shh3.w += vasr(shl3.w,c8w) //
y0.w += vmpyo(s0.w,recipvec.h):<<1:rnd:sat:shift//
max_val.w = vmax(max_val.w,sk.w) //
suma2 = memw(suma+stride_w<<#1) //
}
{ s2.w = VASL(shh2.w,recip_shift) //
y1.w = vmpye(s1.w,recipvec.uh) //
if (p2) sk = shh2 //
p2 = cmp.gt(out_x4,#3-4) //should s3 be included ?
}
{ s3.w = VASL(shh3.w,recip_shift) //
y1.w += vmpyo(s1.w,recipvec.h):<<1:rnd:sat:shift//
min_val.w = vmin(min_val.w,sk.w) //
suma += mpyi(stride_w,#3) //
}
{ y2.w = vmpye(s2.w,recipvec.uh) //
max_val.w = vmax(max_val.w,sk.w) //
if (p2) sk = shh3 //
suma3 = memw(suma+#0) //
}
{ y2.w += vmpyo(s2.w,recipvec.h):<<1:rnd:sat:shift//
min_val.w = vmin(min_val.w,sk.w) //
max_val.w = vmax(max_val.w,sk.w) //
suma = add(suma,stride_w) //
}
{ y3.w = vmpye(s3.w,recipvec.uh) //
sll2 = #0 //
sll3 = #0 //
c_1w = #-1 //
}
{ y3.w += vmpyo(s3.w,recipvec.h):<<1:rnd:sat:shift//
shl2 = constw80 //
shl3 = constw80 //
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
{ y1.uh = vpack(y1.w,y0.w):sat //
vsuma0 = vsplat(suma0) //
vsuma1 = vsplat(suma1) //
sll1 = #0 //
}
{ y3.uh = vpack(y3.w,y2.w):sat //
shh0.w = vadd(wsum.w,vsuma0.w) //
vsuma2 = vsplat(suma2) //
vsuma3 = vsplat(suma3) //
}
{ shh1.w = vadd(wsum.w,vsuma1.w) //
shh2.w = vadd(wsum.w,vsuma2.w) //
shh3.w = vadd(wsum.w,vsuma3.w) //
}
#if defined(SPLIT_OUTPUT)
{ y1y0 = vdeal(y3,y1,c_1w) //
vmem(ptr_ze++#1) = y0.new //
#else
{ vmem(ptr_ze++#1) = y1 //
#endif
}
#if defined(SPLIT_OUTPUT)
{ vmem(ptr_zo++#1) = y1 //
#else
{ vmem(ptr_ze++#1) = y3 //
#endif
if (p1) jump:t .L_width //
out_x4 = add(out_x4,#-4) //[p2]
p1 = cmp.gt(out_x4,#4) //[p2] last iteration
} //end cols per line
/* ---------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_y, #0) //
if(!p0.new) jump:t .L_height //
} //end lines per block
/* ---------------------------------------------------------------------------- */
{ r0 = memw(r29+#APTR+40) // ptr_minmax
}
{ vmem(r0+#0) = max_val //
R17:16 = memd(R29+#0*8) // restore callee-saved registers
}
{ vmem(r0+#1) = min_val //
R19:18 = memd(R29+#1*8) // restore callee-saved registers
}
{ R21:20 = memd(R29+#2*8) // restore callee-saved registers
R23:22 = memd(R29+#3*8) // restore callee-saved registers
}
{ R25:24 = memd(R29+#4*8) // restore callee-saved registers
R31:30 = memd(R29+#6*8) // restore callee-saved registers
}
{ R27:26 = memd(R29+#5*8) // restore callee-saved registers
DEALLOC_RETURN // return
}
.L_end:
/*=============================================================================*/
.size gvconv2db2b2b2u_d32_asm, .L_end-gvconv2db2b2b2u_d32_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 4,676 | hexagon/asm_src/vmemset_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/* ============================================================================ */
.global vmemset_asm
.type vmemset_asm, @function
.balign 32
vmemset_asm:
/* ============================================================================ */
#define dst r0
#define src r1
#define length r2
/* ============================================================================ */
#define dstalign r5
#define end r7
#define sel0 r8
#define kernel r3
#define sel1 r9
#define sel2 r4
#define dsto r10
#define y0 v2
#define vpredp v3
#define vprede v4
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{ sel0 = ##0x01010101 //position of qprolog
src = vsplatb(src)
end = add(length, dst) //last byte of block
} {
qprolog =vsetq(dst) //qprolog vec predicate __|---
y0 = vsplat(src)
sel1 = add(sel0, sel0) //position of modified vec predicates
end = and(end, #127) //alignment of last byte
} {
dstalign = and(dst, #127) //alignment of dst
qepilog = vsetq(end) //setup epilog vec predicate
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
length -= add(end, #-127) //round kernel up to 128 nearest
} {
vprede = vand(qepilog, sel1) //write epilog pred into vreg
qprolog = or(qprolog, !qepilog) //modified proglog if no kernel
length= lsr(length, #7) //kernel in blocks of 128
dstalign = add(dstalign, length) //amount of total data
} {
vpredp|= vand(qprolog, sel0) //store modified prolog
loop0(.L_blocks, length) //start main loop
p2 = cmp.gt(dstalign, #127) //if > 127 dont use modified prolog
if(!p2.new) sel1 = sel0 //dont choose modfied
} {
qprolog = vand(vpredp, sel1) //select the qprolog
qepilog = vand(vprede, sel1) //choose correct qepilog
}
/* ============================================================================ */
.balign 32
.L_blocks:
{
if(!qprolog) vmem(dst++#1) = y0 //do prolog load as part of main loop
qprolog = and(qprolog, !qprolog) //make all subsequent prologs true
}:endloop0
/* ============================================================================ */
{
if(qepilog) vmem(dst+#0) = y0 //store out epilog data
}{
jumpr r31 //return to caller
}
.L_end:
/* ============================================================================ */
.size vmemset_asm, .L_end-vmemset_asm
/* ============================================================================ */
/* ============================================================================ */
|
XiaoMi/nnlib | 21,570 | hexagon/asm_src/gvconv2dbbb_d32_s1f_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/* DJ 05/17/17 speciaized version with hstride = 1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_d32_s1_h_v66.S"
.global gvconv2dbbbs1_v66_asm
.balign 32
.type gvconv2dbbbs1_v66_asm, @function
gvconv2dbbbs1_v66_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xi r0 //data aligned 128
#define ptr_wi r1 //weights aligned 128
#define ptr_zi r2 //results aligned 128
#define in_width r3 //(pad_l+in_width+pad_r) => 4 %4
#define out_next_row r4 //value in bytes to get to next full out row
#define out_width r5 //out_width_pad
#define stride_h_w r26 //0 stride_height|stride_width
#define in_depth r27 //1 %32
#define in_depth_stride_h_w r27:26//
#define filt_width r8 //2 >= 1
#define filt_height r9 //3 >= 1filt_height lines per filter
#define filt_height_width r9:8
#define out_height r10 //4 >= 1 number of vertical lines to perform
#define ptr_filtsum r11 //5 aligned 128
#define ptr_max r21 //6 aligned 128 was 12
#define recip_level r23 //7 recip is 31bit unsigned 0x7f800000000LL / max
#define out_align r1 //8 0, 32, 64, 96
#define skip_col r7 //21
#define lmask r27
#define out_next_d32 r24
#define nslice r25
#define nslice_out_next_d32 r25:24
#define recip_shift r11
/*=============================================================================*/
#define stride_h r26 //0 stride_height|stride_width
#define in_next_rows r28 //in_width * stride_h * in_depth for next output
#define ptr_x0 r6 //r6 spare
#define ptr_x1 r7 //
#define ptr_x1_ptr_x0 r7:6 //
#define stride_w r18 //stride width =1
#define next_outputs r19 //jump to input ptr for next set of outputs
#define ptr_w r20 //
#define in_width_32 r22 //
#define ptr_z r24 //
#define filt_cnt r18
#define ptr_x0_ r12
#define ptr_x1_ r13 //
#define ptr_x1_ptr_x0_ r13:12
#define z_ptr r3
#define c8_c96 r15:14
#define filt_width512 r16 //corrected to use ptr_w as a counter for fused loop
#define ptr_w_next r17
#define ptr_z_next r24
#define STQ r23 //shared with recip_level
#define AEQ0 r21 //align equals 0 needs to be persistent
#define PRED3_0 C4
#define w_count r25
/*=============================================================================*/
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v21 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210 v6 //
#define x3_prev v16 //previous value
#define xout v17 //realigned out
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define maxe v12 //
#define mine v13 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define recipvec v15 //
#define recip_sh_vec v19 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
#define STACK 72
/*=============================================================================*/
{ filt_height_width = memd(sp+#8) // //recip_level_ptr_max = memd(sp+#24)
sp = add(sp,#-STACK) //
out_width = lsr(out_width, #2) //4 outputs at once
} {
memd(sp+#40) = r27:26 //
in_depth_stride_h_w = memd(sp+#(STACK+0)) // //recipvec = vsplat(recip_level)
} {
memd(sp+#32) = r25:24 //
memd(sp+#16) = r21:20 //
c8_c96 = combine(#8, #96) //
} {
memd(sp+#24) = r23:22 //
filt_height = mpy(filt_height.L,in_depth.L) //filt_height*in_depth
in_next_rows= mpy(stride_h_w.H, in_depth.L) //
recip_level = memw(sp+#(STACK+28)) //
} {
recipvec = vmem(recip_level++#1) //
ptr_max = memw(sp+#(STACK+24)) //
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
} {
memd(sp+#8) = r19:18 //
memw(sp+#(STACK+28)) = recip_level //
loop1(.L_width, out_width) //
} {
memd(sp+#0) = r17:16 //
ptr_w_next = ptr_wi //[P,0]ptr_y=ptr_yi initialize filter pointer
out_align = memw(sp+#(STACK+32)) //
r6 = #32
} {
recip_shift = memw(sp+#(STACK+48)) //
r6 -= lsr(out_align,#2) //1/4 for bytes -> 0 -> 32, 32,-> 24, 64 -> 16, 96 -> 8
} {
recip_sh_vec= vsplat(recip_shift) //
r6 = and(r6, #0x1f) //
r7 = #-1
} {
r7 = asl(r7, r6) //
} {
memw(sp+#60) = r7 //left mask
skip_col = memw(sp+#(STACK+36)) //right mask - skip col
r6 = #0x1f
} {
r6 &= asl(skip_col, #3) // (skip_col << 3) & 31
r7 = #-1
} {
r7 = lsr(r7, r6) //
} {
memw(sp+#64) = r7 //right mask
} {
ptr_filtsum = memw(sp+#(STACK+20)) //
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
in_next_rows=mpyi(in_width,in_next_rows) //total vertical stride bytes
maxe = vmem(ptr_max+#0) //
} {
out_next_d32 = memw(sp+#(STACK+40)) //
next_outputs= mpyi(filt_height,in_width_32) //filt_height*in_width*in_depth
filt_height = mpy(filt_height.L, filt_width.L):<<1 //2d filter
} {
memw(sp+#48) = ptr_xi //
filt_width = asl(filt_width, #10) //
loop0(.L_filt, filt_height) //[P,0]for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
} {
next_outputs = add(next_outputs, #-128) //(flt_hight*in_width*in_depth/32-4*stride)*32
filt_width512 = add(filt_width, #-512) //account for 512bytes per loop of w
filt_width = add(filt_width, #-128) //1 cycle off from consumer to generator
} {
mine = vmem(ptr_max+#1) //
AEQ0 = cmp.eq(out_align, #0) //if no alignment enable store
}
/*=============================================================================*/
.balign 64
.L_depth:
{ ptr_xi = memw(sp+#48) // restore ptr_xi
out_height = memw(sp+#(STACK+16)) //number of output lines
ptr_z_next = add(ptr_zi,out_next_d32) //
STQ = !cmp.eq(r0, r0) //force p2 off
} {
ptr_w = ptr_w_next // set ptr of weight
ptr_x0 = ptr_xi //ptr_xi
wsum = vmem(ptr_filtsum++#1) //
lmask = memw(sp+#60) //left mask
} {
memw(sp+#52) = ptr_w_next //save wi for someone else
memw(sp+#56) = ptr_z_next //
ptr_x1 = add(ptr_xi, #100) //[Pheight]setup initial pointer
filt_cnt = add(filt_width512, ptr_w) //add(filt_width, #-1) //ptr_w)
} {
z = vmem(ptr_x0+#0) //[Pheight]load 0-127
ptr_z = ptr_zi //
s3s2 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
s1s0 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
}
w_count = out_width
/*=============================================================================*/
.balign 64
.L_height:
.L_width:
.L_filt:
{ w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
p3 = cmp.eq(filt_cnt, ptr_w) //[0, 0]ki is k1/32 - 0
if(p3.new) ptr_x0 = add(ptr_x0, in_width_32)//[0, 0]move to next line ptr_y keeps going
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
if(p3) filt_cnt = add(filt_width, ptr_w) //[0, 1]
ptr_x1_ptr_x0_= vaddw(ptr_x1_ptr_x0,c8_c96) //[0, 1]ptr_x1_=add(ptr_x1,#8)||/ptr_x0_=add(ptr_x0, #96)
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
z_ptr = mux(p3, ptr_x0, ptr_x1_) //[0, 2]
ptr_x1_ = mux(p3, ptr_x0_, ptr_x1_) //[0, 2]
} {
w0.tmp = vmem(ptr_w++#1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(ptr_x1_, #4) //[0, 3]reset ptr for next row of filter taps
}:endloop0
/*=============================================================================*/
{
PRED3_0 = lmask
lmask = #-1 //next mask default is all on 0xffffffff
x1.h = vpack(y1.w, y0.w):sat //[E1, 0]packe low 16bits together
} {
loop0(.L_filt, filt_height) //[E0, 0]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
s0.w = vasl(s0.w, recip_sh_vec.w) //
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]pack low 16bits together
} {
s1.w = vasl(s1.w, recip_sh_vec.w) //
ptr_x0 = sub(ptr_x0, next_outputs) //[E0, 1]reset data ptr to next 4
ptr_w_next = ptr_w //
w_count = add(w_count, #-1)
} {
y0.w = vmpye(s0.w, recipvec.uh) //[E0, 1](s2 * recip + rnd)>>31
ptr_w = memw(sp+#52) ////[E0, 5]ptr_w=ptr_wi init filter pointer
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255 and pack low 8bits
} {
y0.w+= vmpyo(s0.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 2]
s3.w = vasl(s3.w, recip_sh_vec.w) //
filt_cnt = add(ptr_w,filt_width512) //using the ptr_w as a counter
} {
if(!p0) y0 = maxe
y1.w = vmpye(s1.w, recipvec.uh) //[E0, 3](s2 * recip + rnd)>>31
s2.w = vasl(s2.w, recip_sh_vec.w) //
ptr_x1 = add(ptr_x0, #100) //setup initial pointer
} {
y1.w+= vmpyo(s1.w, recipvec.h):RSS //[E0, 4](s2 * recip + rnd)>>31
maxe.w = vmax(maxe.w, y0.w) //[E0, 0]see if s0 is max
if(!p0) y0 = mine
p0 = STQ
} {
if(!p1) y1 = maxe
mine.w = vmin(mine.w, y0.w) //[E0, 0]see if s0 is min
y2.w = vmpye(s2.w, recipvec.uh) //[E0, 5](s2 * recip + rnd)>>31
STQ = AEQ0
} {
maxe.w = vmax(maxe.w, y1.w) //[E0, 3]
if(!p1) y1 = mine
y2.w+= vmpyo(s2.w, recipvec.h):RSS //[E0, 6](s2 * recip + rnd)>>31
AEQ0 = cmp.eq(r0, r0)
} {
mine.w = vmin(mine.w, y1.w) //[E0, 4]see if z0 is max
if(!p2) y2 = maxe
y3.w = vmpye(s3.w, recipvec.uh) //[E0, 7]#(s2 * recip + rnd)>>31
} {
maxe.w = vmax(maxe.w, y2.w) //[E0, 4]
if(!p2) y2 = mine
y3.w+= vmpyo(s3.w, recipvec.h):RSS //[E0,8](s2 * recip + rnd)>>31
p2 = cmp.eq(w_count, #1) //
} {
mine.w = vmin(mine.w, y2.w) //[E0, 5]see if z0 is max
if(!p3) y3 = maxe
z = vmem(ptr_x0+#0) //pre load 0-127 for next row of filter
s1s0 = vcombine(wsum, wsum) //[E0, 8]initialize accumulator 2,3
} {
maxe.w = vmax(maxe.w, y3.w) //[E0, 2]
if(!p3) y3 = mine
xout = vlalign(x3210,x3_prev,out_align) //[E1, 6]
if(p0)vmem(ptr_z++#1):nt = xout.new //[E1, 6]store 2nd 32bytes
} {
x3_prev = x3210 //[E1, 7]save data for next output align
s3s2 = vcombine(wsum, wsum) //[E0, 8]initialize accumulator 2,3
if(p2) lmask = memw(sp+#64) //right mask
mine.w = vmin(mine.w, y3.w) //[E0, 2]see if z0 is max
}:endloop1 //cols per line kernel loop width
/*=============================================================================*/
{ x1.h = vpack(y1.w, y0.w):sat //[E1, 0]#>>16
out_height = add(out_height, #-1) //Prolog width
STQ = !cmp.eq(r0, r0) //[Pheight]force p2 off
ptr_xi= add(ptr_xi,in_next_rows) //ptr_x+=in_width*stride_h*in_depth)
} {
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]#sat8 <0, >255
p1 = !cmp.eq(out_height, #0) //EE
//loop1(.L_width, out_width) //[Pheight]out_width
lc1 = out_width //[Pheight]out_width
} {
ptr_x0 = ptr_xi //Prolog width ptr_xi
AEQ0 = cmp.eq(out_align, #0) //[Pheight]if no alignment enable store
skip_col = memw(sp+#(STACK+36)) //
} {
p3 = tstbit(skip_col, #2) //[E1, 6] == 4
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255
} {
ptr_zi = add(ptr_zi, out_next_row) //EEnext out line for this depth segment
ptr_x1 = add(ptr_x0, #100) //[Pheight]setup initial pointer
if (p1) z = vmem(ptr_x0+#0) //[Pheight]load 0-127
} {
xout = vlalign(x3210, x3_prev, out_align) //[E1, 6]
vmem(ptr_z+#0):nt = xout.new //[E1, 6]store 2nd 32bytes
w_count = out_width
lmask = memw(sp+#60) //left mask
} {
xout = vlalign(x3210, x3210, out_align) //[E1, 7]
if(p3) vmem(ptr_z+#1):nt = xout.new //[E1, 7]flush out last values
ptr_z = add(ptr_zi, #0) //
if (p1) jump:t .L_height //EE
}//end lines per block//last cols per line
/*=============================================================================*/
nslice = memw(sp+#(STACK+44)) //
nslice = add(nslice,#-1) //
memw(sp+#(STACK+44)) = nslice //
{
p1 = cmp.gt(nslice,#0) //
out_next_d32 = memw(sp+#(STACK+40)) //
recip_level = memw(sp+#(STACK+28)) //
} {
ptr_zi = memw(sp+#56) //
if(p1) recipvec = vmem(recip_level++#1) //
} {
memw(sp+#(STACK+28)) = recip_level //
if p1 jump .L_depth //
}
/*=============================================================================*/
ptr_max = memw(sp+#(STACK+24)) //
{ vmem(ptr_max+#0) = maxe //[E, 0]32max
r17:16 = memd(sp+#0) //restore r16, r17from stack
} {
vmem(ptr_max+#1) = mine //[E, 0]32min
r19:18 = memd(sp+#8) //restore r18,r19
} {
r21:20 = memd(sp+#16) //restore r20,r21
r23:22 = memd(sp+#24) //restore r22,r13
} {
r25:24 = memd(sp+#32) //restore r24,r15
r27:26 = memd(sp+#40) //restore r26,r17
sp = add(sp,#STACK) //
jumpr r31 //
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbbs1_v66_asm, .L_end-gvconv2dbbbs1_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 28,314 | hexagon/asm_src/dwconv2dbbb_s1_5xN_h.S | /*
* Copyright (c) 2016,2017,2018 The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : dwconv2dbbb_s1_5xN_asm */
/* */
/* DESCRIPTION */
/* Depthwise filter stride 1xM, filter size 5xN */
/* input and output ptr non aligned output width */
/* padded, max and min found only on valid range */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 4.30.19 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = 928 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* none */
/* C MODEL */
/* exact match to assembly code */
/*======================================================================*/
#if 0
void dwconv2dbbb_s1_cn(
uint8_t *in_buf,
uint8_t *filt,
uint8_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int filt_height,
int filt_zero,
int32_t *bias_sum,
int32_t *max,
int recip_level,
int recip_shift,
int stride_height)
{
int out_y, d, out_x, ur, in_val, filt_val;
int out_z, filt_y, filt_x, cnt;
int out_width_pad = (out_width+3)&(~3);
int32_t sum, zum, sum0;
int64_t lsum ;
int filt_width = 5;
int o_filt_width = (filt_width+3)&(~3);
int buf_offset;
for (out_y = 0; out_y < out_height; out_y++) {
for (out_x = 0; out_x < out_width_pad; out_x+=4) {
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
for(ur=0; ur < 4; ur++)
{
sum = (int32_t)bias_sum[32*d+out_z];
zum = 0;
for (filt_y = 0; filt_y < filt_height; filt_y++) {
for (filt_x = 0; filt_x < o_filt_width; filt_x++) {
buf_offset = (out_y * stride_height + filt_y) * next_in_width
+ d * next_in_width_32
+ (out_x + ur + filt_x) * 32
+ out_z;
in_val = in_buf[buf_offset];
filt_val = filt[32*d*filt_height*o_filt_width
+ (o_filt_width*filt_y)*32
+ out_z*4 + 128*(filt_x/4)
+ (filt_x % 4)] ;
sum += (uint32_t)in_val*(int32_t)filt_val;
if(filt_x < filt_width)
zum += (uint32_t)in_val*(int32_t)filt_zero;
}
}
sum = sum - zum;
if(ur==0) sum0 = sum;
if(ur == 1 && !(cnt > -3)) sum = sum0;
if(ur == 2 && !(cnt > -2)) sum = sum0;
if(ur == 3 && !(cnt > -1)) sum = sum0;
sum <<= recip_shift;
lsum = (int64_t)sum * ((int64_t)recip_level) + 0x40000000LL;
lsum = lsum >> 31;
sum = (int)lsum;
max[out_z] = (sum > max[out_z]) ? sum : max[out_z];
max[out_z+32] = (sum < max[out_z+32]) ? sum : max[out_z+32];
if(lsum < 0) lsum = 0; if(lsum > 0xffll) lsum = 0xffll;
out_buf[out_y * next_out_width
+ 32 * (out_x+ur)
+ d * next_out_width_32
+ out_z] = (uint8_t) lsum;
}//ur
}//out_z
}//d
}//out_x
}//out_y
return;
}
#endif
/*======================================================================*/
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s1_5xN_h.S"
.global dwconv2dbbb_s1_5xN_asm
.balign 32
.type dwconv2dbbb_s1_5xN_asm, @function
dwconv2dbbb_s1_5xN_asm:
/* ----------------------------------------------------------------------------- */
//calling values
#define in_buf r0 //ptr to start of activations
#define filt r1 //ptr to filter array
#define out_buf r2 //ptr to output activations
#define next_in_width_depth r3 //logical input width
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //physical input width
#define next_out_width_32 r10 //logical output width
#define depth r11 //input and output depth
#define out_width r12 //output width
#define out_height r13 //output height
#define filt_height r25 //filter height (width is <=5 by definition)
#define filt_zero r7 //ilter zero point
#define bias_sum r14 //ptr to bias values
#define ptr_max r15 //ptr to max and mins
#define recip_level r3 //quantization output level
#define recip_shift r8 //shift for accumulator is outputs larger
#define stride_v r28 //vertical stride (horizontal stride assumed 1
//scaler values
#define ptr_w0 r16 //ptr to weights for depth loop
#define ptr_w1 r17 //ptr to weights for width loop
#define c24 r9 //const = 24
#define c8 r6 //const = 8
#define bias_ptr r18 //ptr to bias value for width loop
#define ptr_xin r19 //ptr to activations for depth loop
#define ptr_xin_bias_ptr r19:18 //ptrs to bias and activations
#define ptr_x0 r22 //ptr to acts for width loop
#define ptr_x1 r20 //ptr to acts for depth loop
#define ptr_y r23 //ptr to outputs for width loop
#define depth_cnt r26 //depth count
#define filt_size r8 //size fo filters for each depth
#define next_in_width_depth_stride r28//vertical input stride
#define zzzz r7 //filter zero offsets 0,3
#define ___z r27 //filter zero offsets 0
#define zzz_ r4 //filter zero offsets 1
#define __zz r24 //filter zero offsets 1
#define zz__ r15 //filter zero offsets 2
#define _zzz r21 //filter zero offsets 2
#define z___ r11 //filter zero offsets 3
#define out_width4 r3 //rounded output width
//vector values
#define vrecip v0 //splat quantized level values
#define vshamt_vec v1 //splat quantized shift values
#define max v2 //max vecs
#define min v3 //min vecs
#define bias_val v4 //bias vector
#define x0 v30 //activations 0-3
#define x1 v29 //activations 4-7
#define w4321 v5 //taps pos 3
#define w_432 v6 //taps pos 2
#define w__43 v27 //taps pos 1
#define w___4 v29 //taps pos 0
#define w3210 v28 //taps pos 0
#define w210_ v7 //taps pos 1
#define w10__ v8 //taps pos 2
#define w0___ v9 //taps pos 3
#define x7x5x6x4 v29 //input 4-7 1st shuffle
#define x7x6x5x4 v10 //input 4-7 2nd shuffle
#define x3x1x2x0 v30 //input 0-3 1st shuffle
#define x3x2x1x0 v11 //input 0-3 2nd shuffle
#define s0 v12 //acumulator 0
#define s1 v13 //acumulator 1
#define s2 v14 //acumulator 2
#define s3 v15 //acumulator 3
#define z0 v16 //gemsuma 0
#define z1 v17 //gemsuma 1
#define z2 v18 //gemsuma 2
#define z3 v19 //gemsuma 3
#define d0 v20 //quantized output 0
#define d1 v21 //quantized output 1
#define d1d0 v24 //quantized output 0,1
#define d2 v22 //quantized output 2
#define d3 v23 //quantized output 3
#define d3d2 v25 //quantized output 2,3
#define d3210 v25 //quantized and clipped outputs 0-3
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC)//debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC)//debug scaler reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
c8 = #8 //constant value shift 8
c24= #24 //constant value shift 24
out_width = memw(sp+#2<<2) //load output width
} {
memd(sp+#0) = r17:16 //save on stack
memd(sp+#8) = r19:18 //save on stack
out_width = add(out_width, #3) //round up to 4
} {
memd(sp+#16) = r21:20 //save on stack
memd(sp+#24) = r23:22 //save on stack
out_width = lsr(out_width, #2) //divide by 4
} {
memd(sp+#32) = r25:24 //save on stack
memd(sp+#40) = r27:26 //save on stack
} {
filt_zero = memw(sp+#21<<2) //load filt zero
depth = memw(sp+#17<<2) //depth
} {
M0 = next_in_width_depth //set vert filter stride
out_height = memw(sp+#19<<2) //load output height count
zzzz = vsplatb(filt_zero) //splat filt zero
} {
next_out_width_32 = memw(sp+#16<<2) //load activation width physical
stride_v = memw(sp+#26<<2) //veticl stride
depth = lsr(depth, #5) //depth/32
_zzz = lsr(zzzz, c8) //set pos 2 gemsuma mask
} {
filt_height = memw(sp+#20<<2) //filter mask height
memw(sp+#17<<2) = depth //save depth count for later
__zz = lsr(_zzz, c8) // gemsuma mask pos 1
next_in_width_depth_stride = mpyi(next_in_width_depth,stride_v) //vertical stride
} {
memw(sp+#48) = out_width //
recip_shift = memw(sp+#25<<2) //qunatization shift
zzz_ = asl(zzzz, c8) // gemsuma mask pos 1
} {
vshamt_vec= vsplat(recip_shift) //splat shift
ptr_max = memw(sp+#23<<2) //ptr to max and min
filt_size = filt_height //tride for filter for each depth
} {
bias_sum = memw(sp+#22<<2) //ptr to bias values
max = vmem(ptr_max+#0) //load current max vec
___z = lsr(__zz, c8) // gemsuma mask pos 0
filt_height = add(filt_height, #-1) //decrement filt height for sw pipeline
} {
min = vmem(ptr_max+#1) //load current min vec
depth_cnt = memw(sp+#17<<2) //depth count initialize
zz__ = asl(zzz_, c8) // gemsuma mask pos 2
z___ = asl(zzz_,#16) // gemsuma mask pos 3
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{ out_height = add(out_height, #-1) //decrement height count
ptr_xin_bias_ptr = combine(in_buf, bias_sum) //init depth ptr and bias ptr
ptr_w0 = filt //init filter ptr
recip_level = memw(sp+#24<<2) //quantization level read from origin 1st
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{ x1 = vmemu(ptr_xin+#1) //[P, 0]load activations 4-7
ptr_x0 = ptr_xin //init data ptr for depth loop
ptr_x1 = ptr_xin //[WIDTH, P] data ptr for filt loop
} {
x0 = vmemu(ptr_x1++M0) //[P, 2]load activations 0-3
} {
x7x5x6x4.b = vshuff(x1.b) //[P, 3]shuffle inputs 4-7
out_width = memw(sp+#18<<2) //initialize output width count
vrecip = vmem(recip_level++#1) //splat quant level VECTOR
} {
x3x1x2x0.b = vshuff(x0.b) //[P, 4]shuffle inputs 0-3
bias_val = vmem(bias_ptr++#1) //init bias offset vector
memw(sp+#52) = recip_level //quantization level save to temp location
} {
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[P, 5]shuffle inputs 4-7
ptr_y = out_buf //init output ptr
p3 = !cmp.eq(r0, r0) //disable store for pipeline
out_width4 = memw(sp+#48) //
} {
loop1(.L_width, out_width4) //init width loop
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[P, 6]shuffle inputs 0-3
w3210 = vmem(ptr_w0+#0) //[P, 6]load first 4 taps
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ w___4.cur = vmem(ptr_w0+#1) //[0, 7]taps output 0
s0.uw = vrmpy(x7x6x5x4.ub, w___4.ub) //[0, 7]filter even output
loop0(.L_vert, filt_height) //[WIDTH, P]setup filter loop
ptr_w1 = add(ptr_w0, #256) //[WIDTH, P]advance taps by 256
} {
z0.uw = vrmpy(x7x6x5x4.ub, ___z.ub) //[0, 8]gemsuma for output 0
z1.uw = vrmpy(x7x6x5x4.ub, __zz.ub) //[0, 8]gemsuma for output 1
w4321.uw = vlsr(w3210.uw, c8) //[0, 8]create taps output 3
s0.w = vadd(s0.w, bias_val.w) //[WIDTH, P]add bias to acc 0
} {
s1 = bias_val //[WIDTH, P]add bias to acc 1
s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[0, 9]filter even output
w210_.w = vasl(w3210.w, c8) //[0, 9]creat taps for output 1
ptr_x0 = add(ptr_x0, #128) //[WIDTH,P]+4 32 depths for stride 1
} {
s2 = bias_val //[WIDTH, P]add bias to acc 2
z2.uw = vrmpy(x3x2x1x0.ub, zz__.ub) //[0,10]gemsuma for output 2
z3.uw = vrmpy(x3x2x1x0.ub, z___.ub) //[0,10]gemsuma for output 3
w4321.w += vasl(w___4.w, c24) //[0,10]creat taps for output 3
} {
s3 = bias_val //[WIDTH, P]add bias to acc 3
w10__.w = vasl(w210_.w, c8) //[0,11]create taps for output 2
s1.uw += vrmpy(x3x2x1x0.ub, w210_.ub) //[0,11]filter even output
out_width = add(out_width, #-4) //[WIDTH]decremnt width count
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_vert:
{ s3.uw += vrmpy(x7x6x5x4.ub, w4321.ub) //[0,12]filter output 3
x1 = vmemu(ptr_x1+#1) //[1, 0]load 4-7 activations
} {
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[0,13]gemsuma output 0
z1.uw += vrmpy(x3x2x1x0.ub, zzz_.ub) //[0,13]gemsuma output 1
w_432.uw = vlsr(w4321.uw, c8) //[0,13]create taps output 2
} {
s2.uw += vrmpy(x3x2x1x0.ub, w10__.ub) //[0,14]filter output 2
w0___.w = vasl(w10__.w, c8) //[0,14]create taps output 3
x0 = vmemu(ptr_x1++M0) //[1, 2]load 0-3 activations
} {
s2.uw += vrmpy(x7x6x5x4.ub, w_432.ub) //[0,15]filter output 2
w__43.uw = vlsr(w_432.uw, c8) //[0,15]create taps output 1
x7x5x6x4.b = vshuff(x1.b) //[1, 3]shuffle inputs 4-7
} {
z2.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[0.16]gemsuma output 2
z3.uw += vrmpy(x7x6x5x4.ub, zzzz.ub) //[0,16]gemsuma output 3
x3x1x2x0.b = vshuff(x0.b) //[1, 4]shuffle inputs 0-3
} {
s1.uw += vrmpy(x7x6x5x4.ub, w__43.ub) //[0,17]filter output 1
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[1, 5]shuffle inputs 4-7
} {
s3.uw += vrmpy(x3x2x1x0.ub, w0___.ub) //[0,18]filter output 3
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[1, 6]shuffle inputs 0-3
w3210 = vmem(ptr_w1++#1) //[1, 6]taps output 0
} {
w___4.cur = vmem(ptr_w1++#1) //[1, 7]taps output 0
s0.uw += vrmpy(x7x6x5x4.ub, w___4.ub) //[1, 7]filter output 0
} {
z0.uw += vrmpy(x7x6x5x4.ub, ___z.ub) //[1, 8]gemsuma output 0
z1.uw += vrmpy(x7x6x5x4.ub, __zz.ub) //[1, 8]gemsuma output 1
w4321.uw = vlsr(w3210.uw, c8) //[1, 8]creat taps output 3
} {
s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[1, 9]filter output 0
w210_.w = vasl(w3210.w, c8) //[1, 9]create taps output 1
} {
z2.uw += vrmpy(x3x2x1x0.ub, zz__.ub) //[1,10]gemsuma output 2
z3.uw += vrmpy(x3x2x1x0.ub, z___.ub) //[1,10]gemsuma output 3
w4321.w += vasl(w___4.w, c24) //[1,10]create taps output 3
} {
w10__.w = vasl(w210_.w, c8) //[1,11]create taps output 2
s1.uw += vrmpy(x3x2x1x0.ub, w210_.ub) //[1,11]filter output 1
}:endloop0 //
/* --------------------------------------------------------------------------- */
{ s3.uw += vrmpy(x7x6x5x4.ub, w4321.ub) //[1,12]filter position 3
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]convert 32 to 16bit outputs
w_432.uw = vlsr(w4321.uw, c8) //[1,13]create taps for position 1
} {
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[1,13]gemsuma for pos 0
z1.uw += vrmpy(x3x2x1x0.ub, zzz_.ub) //[1,13]gemsuma for pos 1
w0___.w = vasl(w10__.w, c8) //[1,14]create taps for pos 2
} {
s0.w = vsub(s0.w, z0.w) //[WIDTH]subtract gemsuma from acc 0
s2.uw += vrmpy(x3x2x1x0.ub, w10__.ub) //[1,14]filter position 2
w__43.uw = vlsr(w_432.uw, c8) //[1,15]create filter for pos 1
} {
s2.uw += vrmpy(x7x6x5x4.ub, w_432.ub) //[1,15]filter for pos 2
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]pack 4 outputs into 128b reg
} {
s0.w = vasl(s0.w, vshamt_vec.w) //shift acc0 left
s1.uw += vrmpy(x7x6x5x4.ub, w__43.ub) //[1,17]filter position 1
} {
z2.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[1.16]gemsuma for pos 2
z3.uw += vrmpy(x7x6x5x4.ub, zzzz.ub) //[1,16]gemsuma for pos 3
s1.w = vsub(s1.w, z1.w) //subtract suma from acc1
ptr_x1 = ptr_x0 //[WIDTH, P]init ptr x1 for next width
} {
if(p3) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]store 4 outputs
s2.w = vsub(s2.w, z2.w) //subtract gemsuma from acc2
s3.uw += vrmpy(x3x2x1x0.ub, w0___.ub) //[1,18]filter position 3
} {
s3.w = vsub(s3.w, z3.w) //subtract gemsuma
d0.w = vmpye(s0.w, vrecip.uh) //[0,15]multiply by 1/max
s1.w = vasl(s1.w, vshamt_vec.w) //shift acc 1 left
p0 = !cmp.gt(out_width, #-3) //[WIDTH]see if acc1 is valid
} {
d0.w += vmpyo(s0.w, vrecip.h):SSR //[0,17]quantize acc 0
if(p0) s1 = s0 //overwrite acc 1 if not valid
s2.w = vasl(s2.w, vshamt_vec.w) //shift acc 2 left
p0 = !cmp.gt(out_width, #-2) //[WIDTH]see if acc 2 is valid
} {
s3.w = vasl(s3.w, vshamt_vec.w) //do special shift left of acc
min.w = vmin(min.w, d0.w) //[0,22]update min
max.w = vmax(max.w, d0.w) //[0,18]update max
p1 = !cmp.gt(out_width, #-1) //[WIDTH]check if acc 3 is valid
} {
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
x1 = vmemu(ptr_x0+#1) //[P, 0]load values 4-7
if(p0) s2 = s0 //overwrite 3rd acc. if not valid
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]quatize 2nd set of values
x0 = vmemu(ptr_x1++M0) //load vals 0-3
if(p1) s3 = s0 //overwrite 3th acc with valid data
} {
max.w = vmax(max.w, d1.w) //[0,26]update maxes
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
x7x5x6x4.b = vshuff(x1.b) //[P, 3]sguffle vlaues 4 to 7
p3 = cmp.eq(r0, r0) //enable output store
} {
min.w = vmin(min.w, d1.w) //[0,27]update mins
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]pack 1st and 2nd data
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]quantize 3rds block of data
} {
min.w = vmin(min.w, d2.w) //[0,22]update mins
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
x3x1x2x0.b = vshuff(x0.b) //[P, 4]shuffle data 0 to 3
} {
max.w = vmax(max.w, d2.w) //[0,18]update max's
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]quantize 32 to 8bitd
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[P, 5]2nd shuffl e of values 4 to 7
} {
max.w = vmax(max.w, d3.w) //[0,26]update max
min.w = vmin(min.w, d3.w) //[0,27]update min
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[P, 6]shuffle data again
w3210 = vmem(ptr_w0+#0) //[P, 6]load weights 0 to 3
}:endloop1 //end width
/* --------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]pack 3rd and 4th chunk together
ptr_w0 += asl(filt_size, #8) //[DEPTH,E]filt_size = filt_height*256 //4*3*64
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]update data ptr to next sub line
} {
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]decrement depth count by 32
out_buf = add(out_buf, next_out_width_32) //[DEPTH]update out pt to next sub line
loop1(.L_width, out_width4) //[DEPTH]start next width loop
} {
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]pack 64/64 together
recip_level = memw(sp+#52) //read quantization level from temp location
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]save last 128 actvtns.
if(!p0) jump .L_depth //[DEPTH,E]next chunk of 32 depth
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //check if height used up
depth_cnt = memw(sp+#17<<2) //depth reload
in_buf=add(in_buf,next_in_width_depth_stride)//stride update vertically
if(!p0.new) jump:nt .L_height //next row
}//end height
/* ----------------------------------------------------------------------------- */
ptr_max = memw(sp+#23<<2) //reload ptr to max/mins
{
r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //save off max vals
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //save off min vals
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* =========================================================================== */
.L_end:
/* =========================================================================== */
.size dwconv2dbbb_s1_5xN_asm, .L_end-dwconv2dbbb_s1_5xN_asm
/* =========================================================================== */
|
XiaoMi/nnlib | 20,599 | hexagon/asm_src/gvint_h.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Desrciption
Perform 2d integral over activation input of size in_depth. The in_depth blocks are collapsed to 1
int and then the ints are accumulated horizontally and vertically.
*/
#if 0
void gvint_cn(uint8_t * in_data, int * out_sum, int in_width, int in_height, int in_depth, int *tmp_buf)
int filt_w, int filt_h, int stride_w, int stride_h, in out_width, out_height)
{
int i,j,k, l;
int sum, sumo;
for(j=0; j < in_height; j++)
{
for(i=0; i < in_width; i++)
{
sum = 0;
for(l = 0; l < in_depth/32; l++) {
for(k=0; k < 32; k++) sum += in_data[in_depth*in_width*j+32*in_width*l+32*i+k];
}
tmp_buf[i] = sum;
}
sum = 0;
for(i=0; i < in_width; i++)
{
sum += tmp_buf[i];
if(j==0) sumo = sum; else sumo = sum + out_sum[(j-1)*in_width + i];
out_sum[in_width*j+i] = sumo;
}
}
}
integrate in front of the z = in_depth * filt_offset
z 2z 3z 4z in front of every integral
#endif
/* ------------------------------------------------------------------------------------------ */
.text
.file "gvint_h.S"
.global gvint_asm
.balign 32
.type gvint_asm, @function
gvint_asm:
/* ------------------------------------------------------------------------------------------ */
#define in_ptr0 r0 //
#define out_ptr0 r1 //
#define next_d32_row r2 //width of image physical
#define next_input r3 //mpyi(in_depth, in_width_bytes) logical width of image
#define integral_width r4 //number of integral outputs
#define in_depth r5 //indepth multiple of 32
#define out_height r22 //number of required output rows
#define tmp_buf0 r23 //tmp buffer
#define filt_offset r20 //filteroffset * data sum
#define cntrl r11 //cntrl = ##integral_cntrl
/* ------------------------------------------------------------------------------------------ */
#define in_count r25
#define next_int_width r6 //distance to next output of integral buffer
#define c4 r7 //
#define e1 r17
#define e2 r18
#define e3 r8
#define e4 r9
#define e5 r10
#define in_ptr r12
#define out_ptr r13
#define out_ptr_1 r14
#define n r15 //loop count
#define tmp_buf r19
#define next_output r21 //jmp required to advance to next lot of computation
/* ------------------------------------------------------------------------------------------ */
#define vq1 q0
#define vq2 q1
#define vq3 q2
#define vq4 q3
#define vq5 q3
#define preds v27
#define perm1 v0
#define perm2 v1
#define perm3 v2
#define perm4 v3
#define perm5 v4
#define perm6 v5
#define delta4 v6
#define delta8 v6
#define delta16 v6
#define delta32 v6
#define delta64 v6
#define delta128 v7
#define vzero v26
#define d3d2d1d0 v9
#define h16g16f16e16h_h16g16f16e16l v11:10
#define h16g16f16e16h v11
#define h16g16f16e16l v10
#define d16c16b16a16h_d16c16b16a16l v31:30
#define d16c16b16a16h v31
#define d16c16b16a16l v30
#define h32g32f32e32h_h32g32f32e32l v13:12
#define h32g32f32e32h v13
#define h32g32f32e32l v12
#define a8a8a8a8 v22
#define b8b8b8b8 v23
#define b8b8a8a8h_b8b8a8a8l v29:28
#define b8b8a8a8h v29
#define b8b8a8a8l v28
#define b16b16a16a16 v25
#define d16d16c16c16 v22
#define f16f16e16e16 v14
#define h16h16g16g16 v16
#define d32c32b32a32 v18
#define h32g32f32e32 v19
#define intw31w00 v20
#define inty31y00 v24
#define prev_line v21
/* --------------------------------------------------------------------------------------- */
{ allocframe(#4*8) //
c4 = #-4 //
cntrl = add(pc,##integral_control@PCREL) //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
vzero = #0 //
n = lsr(integral_width, #5) //integral_width / 32
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
n = add(n, #-1) //
} {
filt_offset = memw(sp+#12<<2) //filt_offset unsigned
M0 = next_d32_row //
next_int_width = asl(integral_width, #2) //line to line in integral
} {
tmp_buf0 = memw(sp+#11<<2) //splat filt offset
preds = vmem(cntrl+#6) //
filt_offset = vsplatb(filt_offset) //
in_ptr = in_ptr0 //
} {
tmp_buf = tmp_buf0 //
perm1 = vmem(cntrl+#0) //
e1 = ##0x01010101 //
} {
vq1 = vand(preds, e1) //
perm2 = vmem(cntrl+#1) //
e2 = add(e1, e1) //
out_height = memw(sp+#10<<2) //
} {
vq2 = vand(preds, e2) //
perm3 = vmem(cntrl+#2) //
e3 = add(e2, e2) //
p2 = and(p2, !p2) //disable p2 for previous line
} {
vq3 = vand(preds, e3) //
perm4 = vmem(cntrl+#3) //
in_depth = lsr(in_depth, #5) //how many 32byte chunks to sum
} {
e4 = add(e3, e3) //
perm5 = vmem(cntrl+#4) //
next_output = add(next_input, #-256) //jump 256 bytes right doinf 2 at once
integral_width = lsr(integral_width, #3) //64 * 32/ 256 amount of tmp values
} {
integral_width = add(integral_width, #-1) //after pad to left side
e5 = add(e4, e4) //select vpred 5
perm6 = vmem(cntrl+#5) //last word across whole re
tmp_buf = tmp_buf0 //
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_height:
//sum up and pad integral prepend 8 lines to the integral buffer and 8 elements in front
{ vmem(tmp_buf++#1) = vzero //store 32 sums of 8*inz*filtz*in_depth/32 pre-pad
in_ptr = in_ptr0 //
in_ptr0 = add(in_ptr0, next_input) //[P, 3]update to next indepth*in_width
loop1(.L_width, integral_width) //setup tmp horz loop
}
.L_width:
{ a8a8a8a8 = #0 //init accumulators
b8b8b8b8 = #0 //init accumulators
loop0(.L_sum, in_depth) //set up inner loop of horz sum
nop //
}
.L_sum:
{ d3d2d1d0.tmp = vmem(in_ptr+#1) //col even
b8b8b8b8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
} {
d3d2d1d0.tmp = vmem(in_ptr++M0) //col odd
a8a8a8a8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
}:endloop0
{
loop0(.L_sum, in_depth) //reset inner loop
b8b8a8a8h_b8b8a8a8l = vdeal(b8b8b8b8, a8a8a8a8, c4) //expand words to add in a tree
a8a8a8a8 = #0 //reset accumulator
b8b8b8b8 = #0 //reset accumulator
} {
in_ptr = sub(in_ptr, next_output) //next set of outputs
b8b8a8a8h.w = vadd(b8b8a8a8h.w, b8b8a8a8l.w) //32sums of 8 final block into tmp
vmem(tmp_buf++#1) = b8b8a8a8h.new //final tmp store
}:endloop1
/* --------------------------------------------------------------------------------------- */
.L_integrate:
{ b16b16a16a16= vmem(tmp_buf0+#0) //[P, 0]load 32sums of 8
} {
tmp_buf = add(tmp_buf0, #256) //[P, 0]update ptr by 2 vregs
} {
d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[0, 1]load 32sums of 8
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[0, 1]deal out for 32sums of 16
} {
f16f16e16e16= vmem(tmp_buf++#2) //[0, 2]load 32sums of 8
} {
out_ptr = out_ptr0 //[P, 3]setup out pointer
delta128 = #0 //[P, 3]initialize add sum to next vreg
} {
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[0, 4]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[0, 4]
} {
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[0, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[0, 5]32sums of 16
} {
out_ptr_1 = sub(out_ptr, next_int_width) //previous line
b16b16a16a16= vmem(tmp_buf++#2) //[0, 6]
} {
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[0, 7]
} {
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[0, 8]32 x 32wordsums
loop0(.L_loop_int, n) //[P, 8]
}
/* --------------------------------------------------------------------------------------- */
{ d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[1, 0]
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[1, 0]
} {
delta4 = vdelta(intw31w00, perm1) //[0,10]add words
f16f16e16e16= vmem(tmp_buf++#2) //[1, 1]
} {
if(vq1) intw31w00.w += delta4.w //[0,11]groups of 1word 1_1_
} {
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[1, 3]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[1, 3]
} {
delta8 = vdelta(intw31w00, perm2) //[0,13]2words
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[1, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[1, 5]32sums of 16
} {
if(vq2) intw31w00.w += delta8.w //[0,14]groups of 2words 11__11__
b16b16a16a16= vmem(tmp_buf++#2) //[1, 4]
} {
inty31y00 = intw31w00 //[0,16]
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[1, 7]
} {
delta16 = vdelta(intw31w00, perm3) //[0,17]
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[1, 8]32 x 32wordsums
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_loop_int:
{ if(vq3) inty31y00.w += delta16.w //[0,18]/groups of 4words 1111____1111____
d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[2, 0]
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[2, 0]
} {
vq4 = vand(preds, e4) //[0,19]
delta4 = vdelta(intw31w00, perm1) //[1,10]add words
f16f16e16e16= vmem(tmp_buf++#2) //[2, 1]
} {
delta32 = vdelta(inty31y00, perm4) //[0,20]
prev_line = vmem(out_ptr_1++#1) //[0,20]
if(vq1) intw31w00.w += delta4.w //[1,11]groups of 1word 1_1_
} {
if(vq4) inty31y00.w += delta32.w //[0,21]groups of 8words 11111111________
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[2, 3]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[2, 3]
} {
vq5 = vand(preds, e5) //[0,22]
delta8 = vdelta(intw31w00, perm2) //[1,13]2words
b16b16a16a16= vmem(tmp_buf++#2) //[2, 4]
} {
delta64 = vdelta(inty31y00, perm5) //[0,23]
if(vq2) intw31w00.w += delta8.w //[1,14]groups of 2words 11__11__
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[2, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[2, 5]32sums of 16
} {
delta128 = vdelta(delta128, perm6) //[0,24]full replication of last word
if(vq5) inty31y00.w += delta64.w //[0,24]groups of16words 111--111___--___
if(!p2) prev_line = vzero //[0,24]
} {
delta128.w = vadd(inty31y00.w, delta128.w) //[0,25]add previous last value
inty31y00 = intw31w00 //[1,16]
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[2, 7]
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[0,26]
vmem(out_ptr++#1) = prev_line.new //[0,26]
delta16 = vdelta(intw31w00, perm3) //[1,17]
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[2, 8]32 x 32wordsums
}:endloop0
/* --------------------------------------------------------------------------------------- */
{ if(vq3) inty31y00.w += delta16.w //[2,18]/groups of 4words 1111____1111____
} {
vq4 = vand(preds, e4) //[2,19]
out_ptr0 = add(out_ptr0, next_int_width) //[E,19]go to next output line
} {
delta32 = vdelta(inty31y00, perm4) //[2,20]
prev_line = vmem(out_ptr_1++#1) //[2,20]
} {
if(vq4) inty31y00.w += delta32.w //[2,21]groups of 8words 11111111________
out_height = add(out_height, #-1) //
} {
vq5 = vand(preds, e5) //[2,22]
p0 = cmp.eq(out_height, #0) //
} {
delta64 = vdelta(inty31y00, perm5) //[2,23]
tmp_buf = tmp_buf0 //
} {
delta128 = vdelta(delta128, perm6) //[2,24]full replication of last word
if(vq5) inty31y00.w += delta64.w //[2,24]groups of16words 111--111___--___
if(!p2) prev_line = vzero //[1,24]
} {
delta128.w = vadd(inty31y00.w, delta128.w) //[2,25]add previous last value
p2 = cmp.eq(r0,r0) //enable p2
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[2,26]
vmem(out_ptr++#1) = prev_line.new //[2,26]
if(!p0) jump:t .L_height //[E,26]
}
/* --------------------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
r21:20 = memd(sp+#16)
r23:22 = memd(sp+#24)
} {
dealloc_return
}
/* --------------------------------------------------------------------------------------- */
.L_end:
.size gvint_asm, .L_end-gvint_asm
|
XiaoMi/nnlib | 8,037 | hexagon/asm_src/prelu_d32.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#define IN_OUT r1:0
#define OUT r0
#define IN r1
#define IN_NEXT_ROW r2
#define IN_NEXT_D32 r3
#define OUT_NEXT_ROW IN_NEXT_ROW // assume they are the same for now
#define OUT_NEXT_D32 IN_NEXT_D32 // assume they are the same for now
#define QZERO_ALPHABUF r5:4
#define QZERO r5
#define ALPHABUF r4
#define ALPHA_SHIFT r6
#define RSHIFT7 r7
#define D_D32_ITERS r9:8
#define D_ITERS r9
#define D32_ITERS r8
#define NEXT_OUTER_IN r11
#define NEXT_OUTER_OUT r10
#define NEXT_IN r13
#define NEXT_OUT r12
#define H_ITERS r14
#define SHRINK r15
#define SHRINK_H_ITERS r15:14
#define OUT_QZERO r28
#define VQZERO v0
#define INVALS v1
#define NEGOFFS v2
//#define ZOFFS_OUT v3
#define PRODUCTS v5:4
#define PRODUCTSHI v5
#define PRODUCTSLO v4
#define ALPHAS v6
#define DONEVALS v7
#define POSOFFS_OUT v8
#define NEGOFFS_OUT v9
#define POSPRODS v11:10
#define POSPRODSHI v11
#define POSPRODSLO v10
#define POSOFFS v12
#define VZERO v13
#define VOUT_QZERO v14
#define VSHRINK v15
#define VALPHA_SHRINK v15
.text
.global prelu_hvx_d32
.type prelu_hvx_d32,@function
.p2align 6
prelu_hvx_d32:
{
D_D32_ITERS = memd(r29+#0)
SHRINK_H_ITERS = memd(r29+#8)
QZERO = vsplatb(QZERO)
RSHIFT7 = #7
}
{
OUT_QZERO = memw(r29+#16)
ALPHA_SHIFT = memw(r29+#20)
VZERO = vxor(VZERO,VZERO)
SHRINK = vsplatb(SHRINK)
}
{
//D32_ITERS = add(D32_ITERS,#1)
NEXT_OUTER_IN = add(IN,IN_NEXT_D32)
NEXT_OUTER_OUT = add(OUT,OUT_NEXT_D32)
OUT_QZERO = vsplatb(OUT_QZERO)
}
{
ALPHAS = vmem(ALPHABUF++#1)
loop1(.Lh_iter,H_ITERS)
VQZERO = vsplat(QZERO)
ALPHA_SHIFT = sub(#7,ALPHA_SHIFT)
}
{
VSHRINK = vsplat(SHRINK)
NEXT_IN = add(IN,IN_NEXT_ROW)
NEXT_OUT = add(OUT,OUT_NEXT_ROW)
}
{
//p3=sp1loop0(.Looptop,D32_ITERS)
loop0(.Looptop,D32_ITERS)
p3 = cmp.eq(r0,r0)
VOUT_QZERO = vsplat(OUT_QZERO)
}
{
PRODUCTS.h = vmpy(VSHRINK.ub,ALPHAS.b)
}
{
ALPHAS.b = vasr(PRODUCTSHI.h,PRODUCTSLO.h,ALPHA_SHIFT):rnd:sat
}
.falign
.Ld32_iter:
.Lh_iter:
.Looptop:
{
INVALS.tmp = vmem(IN++#1) // get input data // 1st
NEGOFFS.ub = vsub(VQZERO.ub,INVALS.ub):sat // get negative offsets, pos-->0 // 1st
POSOFFS.ub = vsub(INVALS.ub,VQZERO.ub):sat // get pos offsets, neg-->0 // 1st
}
{
POSPRODS.uh = vmpy(POSOFFS.ub,SHRINK.ub) // scale to output // 1st
}
{
PRODUCTS.h = vmpy(NEGOFFS.ub,ALPHAS.b) // multiply by alpha & scale // 1st
}
POSOFFS_OUT.ub = vasr(POSPRODSHI.h,POSPRODSLO.h,RSHIFT7):rnd:sat
NEGOFFS_OUT.b = vasr(PRODUCTSHI.h,PRODUCTSLO.h,RSHIFT7):rnd:sat
POSOFFS_OUT.ub = vadd(VOUT_QZERO.ub,POSOFFS_OUT.ub):sat
{
DONEVALS.b = vsub(POSOFFS_OUT.b,NEGOFFS_OUT.b) // hopefully our math is right
//if (p3) vmem(OUT++#1) = DONEVALS.new
vmem(OUT++#1) = DONEVALS.new
}
#if 0
{
INVALS.TMP = vmem(IN++#1) // 1st iter
ZOFFS_IN.b = vsub(VQZERO.b,INVALS.b) // 1st iter
POSOFFS2.h = vsub(INVALS.ub,VQZERO.ub) // 1st iter, DOUBLE RESOURCE
}
{
PRODUCTS.h = vmpy(ZOFFS_IN.ub,ALPHAS.b) // 1st iter, DOUBLE RESOURCE
POSOFFS.ub = vasr(POSOFFS2HI.h,POSOFFS2LO.h,POS_SHIFT_R):rnd:sat // 1st iter, negative values --> 0
}
ZOFFS_OUT.b = vasr(PRODUCTSHI.h,PRODUCTSLO.h,RSHIFT):rnd:sat //2nd
{
OUTVALS.b = vsub(VQZERO.b,ZOFFS_OUT.b) // 2nd
QSMALLS = vcmp.eq(VZERO.b,POSOFFS.b) // 2nd iter, > 0 offset?
POSOFFS.b = vadd(VQZERO.b,POSOFFS.b) // 2nd iter
}
DONEVALS = vmux(QSMALLS,OUTVALS,POSOFFS) // 2nd
if (p3) vmem(OUT++#1) = DONEVALS.new // 2nd
#endif
{ nop }:endloop0
{
IN_OUT = combine(NEXT_IN,NEXT_OUT)
NEXT_IN = add(NEXT_IN,IN_NEXT_ROW)
NEXT_OUT = add(NEXT_OUT,OUT_NEXT_ROW)
loop0(.Looptop,D32_ITERS)
//p3=sp1loop0(.Looptop,D32_ITERS)
}:endloop1
{
IN_OUT = combine(NEXT_OUTER_IN,NEXT_OUTER_OUT)
NEXT_OUTER_IN = add(NEXT_OUTER_IN,IN_NEXT_D32)
NEXT_OUTER_OUT = add(NEXT_OUTER_OUT,OUT_NEXT_D32)
ALPHAS = vmem(ALPHABUF++#1)
}
{
NEXT_IN = add(IN,IN_NEXT_ROW)
NEXT_OUT = add(OUT,OUT_NEXT_ROW)
PRODUCTS.h = vmpy(VSHRINK.ub,ALPHAS.b)
D_ITERS = add(D_ITERS,#-1)
}
{
loop1(.Looptop,H_ITERS)
p0 = cmp.eq(D_ITERS,#0)
if (!p0.new) jump:t .Lh_iter
ALPHAS.b = vasr(PRODUCTSHI.h,PRODUCTSLO.h,ALPHA_SHIFT):rnd:sat
}
jumpr r31
.size prelu_hvx_d32,.-prelu_hvx_d32
#if 0
/* prelu_hvx_large_d32(out,in,in_next_row,in_next_d32,w_iters,d32_iters,height,alpha_frac_buf,qzero,shift) */
.text
.global prelu_hvx_large_d32
.type prelu_hvx_large_d32,@function
.p2align 6
prelu_hvx_large_d32:
{
D_D32_ITERS = memd(r29+#0)
POS_SHIFT_H_ITERS = memd(r29+#8)
QZERO = vsplatb(QZERO)
RSHIFT = #7
}
{
D32_ITERS = add(D32_ITERS,#1)
NEXT_OUTER_IN = add(IN,IN_NEXT_D32)
NEXT_OUTER_OUT = add(OUT,OUT_NEXT_D32)
VZERO = vxor(VZERO,VZERO)
}
{
ALPHAS = vmem(ALPHABUF++#1)
loop1(.Ll_h_iter,H_ITERS)
VQZERO = vsplat(QZERO)
}
{
p3=sp1loop0(.Looptop_l,D32_ITERS)
NEXT_IN = add(IN,IN_NEXT_ROW)
NEXT_OUT = add(OUT,OUT_NEXT_ROW)
POS_SHIFT_R = POS_SHIFT
}
.falign
.Ll_d32_iter:
.Ll_h_iter:
.Looptop_l:
{
INVALS.TMP = vmem(IN++#1) // 1st iter
ZOFFS_IN.b = vsub(VQZERO.b,INVALS.b) // 1st iter, DOUBLE RESOURCE
POSOFFS2.h = vsub(INVALS.ub,VQZERO.ub) // 1st iter
ZOFFS_OUT.b = vasr(PRODUCTSHI.h,PRODUCTSLO.h,RSHIFT):rnd:sat //2nd
}
{
OUTVALS.b = vsub(VQZERO.b,ZOFFS_OUT.b) // 2nd
QSMALLS = vcmp.eq(VZERO.b,POSOFFS.b) // 2nd iter, > 0 offset?
POSOFFS.b = vadd(VQZERO.b,POSOFFS.b) // 2nd iter
}
{
PRODUCTS.h = vmpy(ZOFFS_IN.ub,ALPHAS.b) // 1st iter, DOUBLE RESOURCE
DONEVALS = vmux(QSMALLS,OUTVALS,POSOFFS) // 2nd
if (p3) vmem(OUT++#1) = DONEVALS.new // 2nd
POSOFFS.ub = vasr(POSOFFS2HI.h,POSOFFS2LO.h,POS_SHIFT_R):rnd:sat // 1st iter, negative values --> 0
}:endloop0
{
IN_OUT = combine(NEXT_IN,NEXT_OUT)
NEXT_IN = add(NEXT_IN,IN_NEXT_ROW)
NEXT_OUT = add(NEXT_OUT,OUT_NEXT_ROW)
//loop0(.Looptop,D32_ITERS)
p3=sp1loop0(.Looptop_l,D32_ITERS)
}:endloop1
{
IN_OUT = combine(NEXT_OUTER_IN,NEXT_OUTER_OUT)
NEXT_OUTER_IN = add(NEXT_OUTER_IN,IN_NEXT_D32)
NEXT_OUTER_OUT = add(NEXT_OUTER_OUT,OUT_NEXT_D32)
ALPHAS = vmem(ALPHABUF++#1)
}
{
NEXT_IN = add(NEXT_IN,IN_NEXT_ROW)
NEXT_OUT = add(NEXT_OUT,OUT_NEXT_ROW)
}
{
loop1(.Looptop_l,H_ITERS)
D_ITERS = add(D_ITERS,#-1)
p0 = cmp.eq(D_ITERS,#1)
if (!p0.new) jump:t .Lh_iter
}
jumpr r31
.size prelu_hvx_d32,.-prelu_hvx_d32
#endif
|
XiaoMi/nnlib | 18,050 | hexagon/asm_src/maxpool_3x3s2_d32.S |
/*
* Copyright (c) 2016,2017,2018 The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : maxpool_slice_hvx_3x3_stride2 */
/* */
/* DESCRIPTION */
/* Get maximun on 3x3 with d32 format */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* STACK = bytes */
/* ASSUMPTIONS */
/* arrays are 128 byte aligned */
/* C MODEL */
/*======================================================================*/
/*
int maxpool_slice_hvx_3x3_stride2(
uint8_t *out,
const uint8_t *in,
int32_t in_next_row,
int32_t out_next_row,
int32_t out_vectors_wide,
int32_t out_lines,
int32_t out_lalign);
*/
.file "maxpool_3x3_s2_d32.S"
/*======================================================================*/
#define outptr r0
#define inptr r1
#define in_outptr r1:0
#define in_next_row r2
#define tmp_outptr r3
#define r_valign r4
#define out_lines r5
#define const32 r6
#define const_m32 r7
#define const_m32_32 r7:6
#define outer_next_outptr r8
#define outer_next_inptr r9
#define outer_next_in_outptr r9:8
#define rtmp r10
#define out_align r11
#define fr out_align
#define out_vectors_wide r12
#define out_next_row r13
#define l2param_l r14
#define l2param_h r15
#define l2param r15:14
#define pf_width r14
#define pf_height r15
#define c128 r28
#define p_odd p1
#define pfptr r10
/*======================================================================*/
#define in00 v0
#define in10 v1
#define in20 v2
#define in30 v3
#define in40 v4
#define h0_0246 v8
#define h0_1357 v9
#define h0_1357_0246 v9:8
#define h1_0246 v10
#define h1_1357 v11
#define h1_1357_0246 v11:10
#define vsum0L v12
#define vsum1L v13
#define vsum0M v14
#define vsum1M v15
#define acc0 v18
#define acc1 v19
#define maxtmp v22
#define vXsum0L v24
#define vXsum1L v25
#define vXsum0M v26
#define vXsum1M v27
/* NOTE CAREFULLY: alias INx8 to INx0 for stride2 */
#define vsum0R vsum0L
#define vsum1R vsum1L
#define vXsum0R vXsum0L
#define vXsum1R vXsum1L
/*======================================================================*/
#if __HEXAGON_ARCH__ == 60
#define COND(_a)
#elif __HEXAGON_ARCH__ >= 62
#define COND(_a) IF (_a)
#endif
/*======================================================================*/
//
// last parameter out_lalign:
// = 0 means output[0] is based on in[0,1,2], output [1] on in[2,3,4] etc
// = 32 means output [0] is based on [1,2,3], output [1] on in[3,4,5] etc
// = 64 means output[3] is based on in[0,1,2], output [4] on in[2,3,4] etc
// = 96 means output[3] is based on in[1,2,3], output [4] on in[3,4,5] etc
//
// (currently this supports only 0 and 96; others are only needed when
// w_pad_before != 4, and the C code isn't set up to handle that yet.
/*======================================================================*/
.text
.global maxpool_slice_hvx_3x3_stride2
.type maxpool_slice_hvx_3x3_stride2,@function
maxpool_slice_hvx_3x3_stride2:
{
out_lines = asrrnd(out_lines,#1) //
m0 = in_next_row //
out_vectors_wide = r4 // free r4 for r_valign
out_next_row = r3 // free r3
}{
out_align=memw(sp+#0) //
rtmp = asl(in_next_row,#2) //
c128 = #128 //
pf_width = asl(out_vectors_wide,#8) // 128*2*out_vectors_wide
}{
rtmp = sub(#128, rtmp) //
p_odd = tstbit(out_align,#5) //
if !p_odd.new pf_width = add(pf_width,c128) // 128*2*out_vectors_wide (+ 128)
pf_height = #4 //
}{
m1 = rtmp //
const_m32_32 = combine(#-32,#32) //
l2param_l = combine(pf_width.l,pf_height.l) //
l2param_h = in_next_row //
}{
l2param_h.H = #1 //l2parm= 1|in_next_row|128*2*out_vectors_wide(+128)|4
loop1(.Louter_3x3s2,out_lines) //
outer_next_inptr = addasl(inptr,in_next_row,#2) //
nop //
}{
p0 = cmp.gt(out_lines,#1) //
if !p0.new l2param_l = #0 //
pfptr = add(outer_next_inptr,in_next_row) //
if (p_odd) jump:nt .Ldo_inner96 //
}
.balign 32
.Louter_3x3s2:
.wait_l2fetch:
{ fr = usr // wait_for_l2fetch()
}{
p0 = cmp.gt(fr,#-1) //
if (!p0.new) jump:nt .wait_l2fetch //
}{
l2fetch(pfptr,l2param) //
}{
in00 = vmem(inptr++m0) //
out_lines = add(out_lines,#-1) //
p3 = sp1loop0(.Linner_3x3s2,out_vectors_wide) //
}{
in10.tmp = vmem(inptr++m0) //
vsum0L.ub = vmax(in00.ub,in10.ub) //
nop //
}{
in20.cur = vmem(inptr++m0) //
vsum0L.ub = vmax(vsum0L.ub,in20.ub) //
outer_next_outptr = addasl(outptr,out_next_row,#1)//
nop //
}{
in30.tmp = vmem(inptr++m0) //
vsum1L.ub = vmax(in20.ub,in30.ub) //
}{
in40.tmp = vmem(inptr++m1) //
vsum1L.ub = vmax(vsum1L.ub,in40.ub) //
}
.balign 32
.Linner_3x3s2:
{
in00 = vmem(inptr++m0) //
acc0.ub = vmax(acc0.ub, maxtmp.ub) //[2]row 0 outputs.
tmp_outptr = add(outptr,out_next_row) //[2]
COND(p3) vmem(outptr+#0) = acc0.new //[2]
}{
in10.tmp = vmem(inptr++m0) //
vsum0M.ub = vmax(in00.ub,in10.ub) //
maxtmp = valign( vsum1R, h1_0246, const32 ) //[2] {2,4,6,8 }
}{
in20.cur = vmem(inptr++m0) //
vsum0M.ub = vmax(vsum0M.ub,in20.ub) //
acc1.ub = vmax(acc1.ub, maxtmp.ub) //[2]row 1 outputs.
COND(p3) vmem(tmp_outptr+#0) = acc1.new //[2]
}{
in30.tmp = vmem(inptr++m0) //
vsum1M.ub = vmax(in20.ub,in30.ub) //
if p3 outptr = add(outptr,c128) //[2]
}{
h0_1357_0246 = vdeal( vsum0M, vsum0L,const_m32) //
in40.tmp = vmem(inptr++m1) //
vsum1M.ub = vmax(vsum1M.ub,in40.ub) //
}{
// have {0,2,4,6} and {1,3,5,7}; need {2,4,6,8}
acc0.ub = vmax(h0_1357.ub, h0_0246.ub ) //
in00 = vmem( inptr++m0) //
}{
h1_1357_0246 = vdeal( vsum1M, vsum1L,const_m32) //
in10.tmp = vmem(inptr++m0) //
vsum0R.ub = vmax(in00.ub,in10.ub) //
}{
in20.cur = vmem(inptr++m0) //
vsum0R.ub = vmax(vsum0R.ub,in20.ub) //
acc1.ub = vmax(h1_1357.ub, h1_0246.ub ) //
}{
in30.tmp = vmem(inptr++m0) //
vsum1R.ub = vmax(in20.ub,in30.ub) //
}{
maxtmp = valign( vsum0R, h0_0246, const32 ) // {2,4,6,8 }
in40.tmp = vmem(inptr++m1) //
vsum1R.ub = vmax(vsum1R.ub,in40.ub) //
}:endloop0
{
acc0.ub = vmax(acc0.ub, maxtmp.ub) //[2]row 0 outputs.
tmp_outptr = add(outptr,out_next_row) //[2]
vmem(outptr+#0) = acc0.new //[2]
in_outptr = outer_next_in_outptr //
}{
maxtmp = valign( vsum1R, h1_0246, const32 ) //[2] {2,4,6,8 }
p0 = cmp.gt(out_lines,#1) //
outer_next_inptr += asl(in_next_row,#2) //
}{
acc1.ub = vmax(acc1.ub, maxtmp.ub) //[2]row 1 outputs.
vmem(tmp_outptr+#0) = acc1.new //[2]
if !p0 l2param_l = #0 //
pfptr = add(outer_next_inptr,in_next_row) //
}:endloop1
/*======================================================================*/
{
r0 = #0 //
jumpr r31 //
}
/*======================================================================*/
// out_lalign = 96
// this is like the align = 0 case, if we push 5 pixels into the start of each row first.
// this is done by finding vsumX[01][LMR] and then align M,L,
//
// before row loop:
//
// Lx = L= [x,x,x,x]
// in row loop:
//
// Mx = [0,1,2,3] + 8*i
// -> M = valign(Mx,Lx,96) = [x,0,1,2]
// Rx = [4,5,6,7] + 8*i
// -> R = valign(Rx,Mx,96) = [3,4,5,6]
// - use L,M,R as before;
// Lx <= Rx; L<=R (done by aliased regs)
//
//
//
/*======================================================================*/
.Ldo_inner96:
{
loop1(.Louter_3x3s2_x96,out_lines) //
r_valign = #96 //
}
.balign 32
.Louter_3x3s2_x96:
.wait_l2fetch_x96:
{ fr = usr // wait_for_l2fetch()
}{
p0 = cmp.gt(fr,#-1) //
if (!p0.new) jump:nt .wait_l2fetch_x96 //
}{
l2fetch(pfptr,l2param) //
}{
vXsum0L = #0 //
vXsum1L = #0 //
outer_next_outptr = addasl(outptr,out_next_row,#1)//
p3 = sp1loop0(.Linner_3x3s2_x96,out_vectors_wide) //
}{
vsum0L = vXsum0L //
vsum1L = vXsum1L //
nop //
}
.balign 32
.Linner_3x3s2_x96:
{
in00 = vmem(inptr++m0) //
acc1.ub = vmax(h1_1357.ub, h1_0246.ub ) //[2]
tmp_outptr = add(outptr,out_next_row) //[2]
maxtmp = valign( vsum0R, h0_0246, const32 ) //[2] {2,4,6,8 }
}{
in10 = vmem(inptr++m0) //
vsum1R = valign(vXsum1R,vXsum1M,r_valign) //[2]
acc0.ub = vmax(acc0.ub, maxtmp.ub) //[2]
#if __HEXAGON_ARCH__ == 60
vmem(outptr+#0) = acc0.new //[2]
#else
if p3 vmem(outptr++#1) = acc0.new //[2]
#endif
}{
in20 = vmem(inptr++m0) //
vXsum0M.ub = vmax(in00.ub,in10.ub) //
}{
vXsum0M.ub = vmax(vXsum0M.ub,in20.ub) // done
maxtmp = valign( vsum1R, h1_0246, const32 ) //[2] {2,4,6,8 }
}{
in30.cur = vmem(inptr++m0) //
vXsum1M.ub = vmax(in20.ub,in30.ub) //
acc1.ub = vmax(acc1.ub, maxtmp.ub) //[2]row 1 outputs.
#if __HEXAGON_ARCH__ == 60
vmem(tmp_outptr+#0) = acc1.new //[2]
#else
if p3 vmem(tmp_outptr+#0) = acc1.new //[2]
#endif
}{
vsum0M = valign(vXsum0M,vXsum0L,r_valign) //
in40.cur = vmem(inptr++m1) //
vXsum1M.ub = vmax(vXsum1M.ub,in40.ub) // done
}{
in00 = vmem(inptr++m0) //
#if __HEXAGON_ARCH__ == 60
if p3 outptr = add(outptr,c128) //[2]
#endif
}{
in10.cur = vmem(inptr++m0) //
vXsum0R.ub = vmax(in00.ub,in10.ub) //
vsum1M = valign(vXsum1M,vXsum1L,r_valign); //
}{
in20.cur = vmem(inptr++m0) //
vXsum0R.ub = vmax(vXsum0R.ub,in20.ub) // done
h0_1357_0246 = vdeal( vsum0M, vsum0L,const_m32) //
}{
in30.cur = vmem(inptr++m0) //
vXsum1R.ub = vmax(in20.ub,in30.ub) //
h1_1357_0246 = vdeal( vsum1M, vsum1L,const_m32) //
}{
in40.cur = vmem(inptr++m1) //
vXsum1R.ub = vmax(vXsum1R.ub,in40.ub) // done
vsum0R = valign(vXsum0R,vXsum0M,r_valign); //
acc0.ub = vmax(h0_1357.ub, h0_0246.ub ) //
}:endloop0
{
acc1.ub = vmax(h1_1357.ub, h1_0246.ub ) //
tmp_outptr = add(outptr,out_next_row); //
maxtmp = valign( vsum0R, h0_0246, const32 ) // {2,4,6,8 }
out_lines = add(out_lines,#-1) //
}{
acc0.ub = vmax(acc0.ub, maxtmp.ub); // row 0 outputs.
vmem(outptr+#0) = acc0.new //
vsum1R = valign(vXsum1R,vXsum1M,r_valign); //
in_outptr = outer_next_in_outptr //
}{
maxtmp = valign( vsum1R, h1_0246, const32 ) // {2,4,6,8 }
outer_next_inptr += asl(in_next_row,#2) //
p0 = cmp.gt(out_lines,#1) //
if !p0.new l2param_l = #0 //
}{
acc1.ub = vmax(acc1.ub, maxtmp.ub); // row 1 outputs.
vmem(tmp_outptr+#0) = acc1.new //
pfptr = add(outer_next_inptr,in_next_row) //
}:endloop1
/*======================================================================*/
{
r0 = #0 //
jumpr r31 //
}
/*======================================================================*/
.size maxpool_slice_hvx_3x3_stride2,.-maxpool_slice_hvx_3x3_stride2
/*======================================================================*/
|
XiaoMi/nnlib | 22,499 | hexagon/asm_src/gvconvsum2dbbw_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gvm vector matrix multiply, result left at */
/* 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> K*N/256+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 960 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvconvsum2dbbw_h.S"
.global gvconvsum2dbbw_asm
.balign 32
.type gvconvsum2dbbw_asm, @function
gvconvsum2dbbw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define in_width r3 //(pad_x+in_width) * depth
#define out_width r4 //out_width
#define m r5 //is stride of the output matrix always mult of 32
#define stride_depth r8 //0 stride|depth between computations
#define filt_width r6 //1 depth*filt_width
#define filt_height r6 //2 filt_hieght lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_datasum r10 //4
#define ptr_weightsum r11 //5
#define ptr_max r16 //6
#define in_offset r14 //7
#define zsum r6 //8
/*=============================================================================*/
#define filt_skip r13 //the skip back after the fot_width is done for next filt_y
#define stride3_1 r12 //used in prefetch
#define ptr_x0 r11
#define stride4 r13 //
#define stride r25
#define next_outputs r23 //jump to input ptr for next set of outputs
#define ptr_y r9 //
#define col_count r22
#define c4 r6
#define mstride r15
#define fetch_count r7
#define pre_x r28
#define PREFETCH 64
// r10 r7
#define sum1_sum0 r1:0
#define sum1 r1
#define sum0 r0
#define sum3_sum2 r5:4
#define sum3 r5
#define sum2 r4
#define sum5_sum4 r25:24
#define sum5 r25
#define sum4 r24
#define sum7_sum6 r27:26
#define sum7 r27
#define sum6 r26
#define MSTRIDE M0 //stride*depth
#define M4STRIDE_1 M1 //3*stride*depth-16 0-1-2-3
//01234567
#define x07x04x03x00 r21:20 //11-----1
#define x07x04 r21 //11-----1
#define x03x00 r20 //1------1
#define x0fx0cx0bx08 r15:14 //1111---1
#define x0fx0c r15 //1111---1
#define x0bx08 r14 //111----1
#define x17x14x13x10 r19:18 //11------
#define x17x14 r19 //11------
#define x13x10 r18 //1-------
#define x1fx1cx1bx18 r17:16 //1111----
#define x1fx1c r17 //1111----
#define x1bx18 r16 //111-----
#define x27x24x23x20 r21:20 //---111--
#define x27x24 r21 //---111--
#define x23x20 r20 //---11---
#define x2fx2cx2bx28 r19:18 //---1111-
#define x2fx2c r19 //---11111
#define x2bx28 r18 //---1111-
#define x37x34x33x30 r15:14 //----11--
#define x37x34 r15 //----11--
#define x33x30 r14 //----1---
#define x3fx3cx3bx38 r17:16 //----1111
#define x3fx3c r17 //----1111
#define x3bx38 r16 //----111-
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vwsum v15 //
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define vc8000 v14 //
/*=============================================================================*/
{
stride_depth = memw(sp+#0<<2) //extract stride*depth
dcfetch(ptr_x) //
} {
filt_height = memw(sp+#2<<2) //extract filt_height
//out_height = memw(sp+#3<<2) //number of output lines
m = asl(m, #2) //in ints
dcfetch(ptr_x+#32) //
} {
ptr_weightsum = memw(sp+#5<<2) //ptr pre computed weight sum
allocframe(#72) // 20<<2
} {
memd(sp+#32) = r25:24 //
memd(sp+#0) = r17:16 //
stride = lsr(stride_depth, #16) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
stride_depth = mpy(stride_depth.H, stride_depth.L)
} {
M0 = stride_depth //
memd(sp+#8) = r19:18 //
memd(sp+#40) = r27:26 //
} {
vwsum = vmem(ptr_weightsum+#0) //
stride3_1 = addasl(stride_depth, stride_depth,#1) //3*stride
r16 = ##0x80000001 //max negative
} {
stride3_1 = sub(#16, stride3_1) //
next_outputs = mpyi(filt_height, in_width)
vc8000 = vsplat(r16) //
memw(sp+#56) = out_width //
} {
filt_width = memw(sp+#21<<2) //extract filt_width*depth
M1 = stride3_1 // add to
} {
stride3_1 = add(stride3_1, #16) //used for line prefetch
stride4= asl(stride_depth, #1) //4-2*stride
memw(sp+#48) = ptr_x //
memw(sp+#52) = ptr_yi //
} {
memw(sp+#60) = m //
next_outputs = sub(next_outputs, stride4)
filt_skip = sub(filt_width, in_width)
filt_width = lsr(filt_width, #4) //filt_width / 16
} {
memw(sp+#64) = r28
ptr_max = memw(sp+#26<<2) //ptr pre computed max value in output
filt_width = add(filt_width, #-1)
p3 = cmp.gt(stride_depth, #96) //is !(D <= 96) heuristic to fix fall behind fetch
} {
if(p3) stride3_1 = sub(stride3_1, stride_depth) //used for line prefetch
maxe= vmem(ptr_max+#0)
in_width = mpyi(in_width, stride) //
memw(sp+#21<<2) = filt_width //extract filt_width*depth /16 - 1
}
/*============================================================================*/
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#48) //
memw(sp+#23<<2) -= #1 //out_height = add(out_height, #-1) //
} {
col_count = memw(sp+#56) //out_width
memw(sp+#48) += in_width //ptr_x += in_width
pre_x = add(ptr_x0, #PREFETCH)
}
.balign 32
.L_width:
{
ptr_y = memw(sp+#52) //ptr_yi //[P, 0] initialize filter pointer
filt_height = memw(sp+#22<<2) //extract filt_height
fetch_count = #0
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
z1z0 = vcombine(vwsum, vwsum) //[P, 0]
dcfetch(pre_x)
} {
loop1(.L_filt_height, filt_height) //[P, 0]for(filt_y=0; filt_y < n; filt_y+=1){
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
z3z2 = vcombine(vwsum, vwsum) //[P, 0]
pre_x = add(pre_x, stride_depth)
} {
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
sum1_sum0 = combine(#0, #0) //[P, 0]
sum3_sum2 = combine(#0, #0) //[P, 0]
} {
sum5_sum4 = combine(#0, #0) //[P, 0]
sum7_sum6 = combine(#0, #0) //[P, 0]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}
.balign 32
.L_filt_height:
{
filt_width = memw(sp+#21<<2) //extract filt_width*depth /16 - 1
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[0, 4]
dcfetch(pre_x)
fetch_count = add(fetch_count, #1)
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 4]
pre_x = add(pre_x, stride_depth)
p3 = cmp.eq(fetch_count, #2) //
} {
sum3_sum2 += vraddub(x1fx1cx1bx18, x17x14x13x10) //[0, 5]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 5]
y2 = vmem(ptr_y++#2) //[0, 5]32x4
if(p3) fetch_count = #0
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 6]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 6]
y3 = vmem(ptr_y+#-1) //[0, 6]32x4
if(p3) pre_x = add(pre_x, stride3_1)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 7]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 7]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[0, 7]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[0, 7]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 8]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 8]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[0, 8]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[0, 8]
} {
sum5_sum4 += vraddub(x2fx2cx2bx28, x27x24x23x20) //[0, 9]
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 9]
}
.balign 32
.L_filt_width:
{
sum7_sum6 += vraddub(x3fx3cx3bx38, x37x34x33x30) //[0,10]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0,10]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(pre_x)
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0,11]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0,11]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,12]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,12]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[1, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,13]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,13]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[1, 3]
} {
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1, 4]
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 4]
dcfetch(pre_x)
pre_x = add(pre_x, stride_depth)
} {
sum3_sum2 += vraddub(x1fx1cx1bx18, x17x14x13x10) //[1, 5]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 5]
y2 = vmem(ptr_y++#2) //[1, 5]32x4
fetch_count = add(fetch_count, #1)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 6]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 6]
y3 = vmem(ptr_y+#-1) //[1, 6]32x4
p3 = cmp.eq(fetch_count #2)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 7]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 7]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[1, 7]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[1, 7]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 8]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 8]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[1, 8]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[1, 8]
} {
sum5_sum4 += vraddub(x2fx2cx2bx28, x27x24x23x20) //[1, 9]
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 9]
if(p3) fetch_count = #0 //[1, 9]
if(p3) pre_x = add(pre_x, stride3_1) //[1, 9]
}:endloop0
{
sum7_sum6 += vraddub(x3fx3cx3bx38, x37x34x33x30) //[1,10]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1,10]
y0 = vmem(ptr_y++#2) //[0, 0]32x4
ptr_x0 = sub(ptr_x0, filt_skip) //[E, 0]move to next line ptr_y keeps going
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1,11]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1,11]
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
dcfetch(ptr_x0+#PREFETCH)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,12]
pre_x = add(ptr_x0, #PREFETCH)
fetch_count = #0
} {
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,12]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,13]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,13]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}:endloop1
{
ptr_x0 = sub(ptr_x0, next_outputs) //
in_offset = memw(sp+#27<<2) //+18+7
zsum = memw(sp+#28<<2) //+18+8res as zsum
} {
sum0 = zsum //
sum1 = add(sum0, sum1) //
ptr_datasum = memw(sp+#24<<2) //data sum ptr
} {
pre_x = add(ptr_x0, #PREFETCH) //pre_x, next_outputs) //
sum0 += mpyi(in_offset, sum1) //
mstride = memw(sp+#60) // result matrix stride copied into separete reg
} {
memw(ptr_datasum++#1<<2) = sum0 //
x0 = vsplat(sum0) //
} {
z0.w = vadd(z0.w, x0.w) //
vmem(ptr_z+#0):nt = z0.new //[E, ]
ptr_z = add(ptr_z, mstride) //
p0 = cmp.gt(col_count, #1) //
} {
maxe.w = vmax(maxe.w, z0.w) //
sum2 = zsum //
sum3 = add(sum2, sum3) //
dcfetch(ptr_x0)
} {
sum2 += mpyi(in_offset, sum3) //
dcfetch(ptr_x0+#32)
} {
if(p0)memw(ptr_datasum++#1<<2) = sum2 //
x1 = vsplat(sum2) //
} {
z1.w = vadd(z1.w, x1.w) //
if(p0)vmem(ptr_z+#0):nt = z1.new //[E, ]
if(p0)ptr_z = add(ptr_z, mstride) //
p1 = cmp.gt(col_count, #2) //
} {
if(!p0) z1 = vc8000 //
sum4 = zsum //
sum5 = add(sum4, sum5) //
} {
sum4 += mpyi(in_offset, sum5) //
} {
maxe.w = vmax(maxe.w, z1.w) //
if(p1)memw(ptr_datasum++#1<<2) = sum4 //
x2 = vsplat(sum4) //
} {
z2.w = vadd(z2.w, x2.w) //
if(p1)vmem(ptr_z+#0):nt = z2.new //[E, ]
if(p1)ptr_z = add(ptr_z, mstride) //
p0 = cmp.gt(col_count, #3) //
} {
if(!p1) z2 = vc8000 //
sum6 = zsum //
sum7 = add(sum6, sum7) //
} {
sum6 += mpyi(in_offset, sum7) //
} {
maxe.w = vmax(maxe.w, z2.w) //
if(p0)memw(ptr_datasum++#1<<2) = sum6 //
x3 = vsplat(sum6) //
} {
z3.w = vadd(z3.w, x3.w) //
if(p0)vmem(ptr_z+#0):nt = z3.new //[E, ]
if(p0)ptr_z = add(ptr_z, mstride) //
} {
if(!p0) z3 = vc8000 //
col_count = add(col_count, #-4) //
memw(sp+#24<<2) = ptr_datasum //data sum ptr
} {
maxe.w = vmax(maxe.w, z3.w) //
p2 = cmp.gt(col_count, #0) //
if(p2.new) jump:t .L_width //
}//end cols per line
{
out_height = memw(sp+#23<<2) //
} {
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
{
loop0(.L_peak, #5) //[P, 0]
c4 = #4 //
ptr_max = memw(sp+#26<<2) //ptr pre computed max value in output
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
r28 = memw(sp+#64)
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvconvsum2dbbw_asm, .L_end-gvconvsum2dbbw_asm
|
XiaoMi/nnlib | 14,469 | hexagon/asm_src/ivint_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : ivint_asm
*
* DESCRIPTION
* Permfrm, 2d integration of a known depth 4 image. Assumption is
* input width is padded to multiple of 32. Input width == output width
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 09/12/17 created
*
* CYCLE-COUNT: Integrates 32 depth 4 elements every 7 packets 18.3 bytes / packet
*
* MEMORY
* CODESIZE = 320 bytes
* STACK = 0 bytes
* ASSUMPTIONS
* Aligned to 128 bytes, width % 128 = 0
*/
#if 0
void ivint_cn(
uint8_t * in_data, int * out_sum, int in_width, int in_height, int filt_offset
) {
int i,j,k;
int sum, sumo;
int in_depth = 4;
int *tmp_buf = (int)malloc(4*in_width);
for(j=0; j < in_height; j++)
{
for(i=0; i < in_width; i++)
{
sum = 0;
for(k=0; k < in_depth; k++)
{
sum += filt_offset*in_data[in_depth*in_width*j+in_depth*i+k];
}
tmp_buf[i] = sum;
}
sum = 0;
for(i=0; i < in_width; i++)
{
sum += tmp_buf[i];
if(j==0) sumo = sum; else sumo = sum + out_sum[(j-1)*in_width + i];
out_sum[in_width*j+i] = sumo;
}
}
free(tmp_buf);
return;
}
#endif
/* ---------------------------------------------------------------- */
.text
.global ivint_asm
.balign 32
.type ivint_asm, @function
ivint_asm:
/* ---------------------------------------------------------------- */
//calling registers
#define in_ptr r0 //
#define out_ptr r1 //
#define in_width r2 //
#define in_height r3 //
#define filt_offset r4 //
#define cntrl r5 //
//scaler registers
#define e1 r6 //
#define e2 r7 //
#define e3 r8 //
#define e4 r9 //
#define e5 r10 //
#define out_ptr_1 r11 //
#define width_bytes r14 //
#define ne5 r13 //
#define lcount r12 //
#define ne5_lcount r13:12//
//vector registers
#define vq1 q0 //
#define vq2 q1 //
#define vq3 q2 //
#define vq4 q3 //
#define vq5 q3 //
#define preds v18 //
#define perm1 v0 //
#define perm2 v1 //
#define perm3 v2 //
#define perm4 v3 //
#define perm5 v4 //
#define perm6 v5 //
#define delta4 v6 //
#define delta8 v7 //
#define delta16 v8 //
#define delta32 v9 //
#define delta64 v10 //
#define delta128 v11 //
#define vzero v12 //
#define intw31w00 v13 //
#define intx31x00 v14 //
#define inty31y00 v15 //
#define d31d00 v16 //
#define prev_line v17 //
/* ---------------------------------------------------------------- */
{
perm1 = vmem(cntrl+#0) //[P, 0]stage 1 of integration add even to odd
filt_offset = vsplatb(filt_offset) //[P, 0]replicate filter offset
e1 = ##0x01010101 //[P, 0]entry 0 of predicates
} {
d31d00.tmp = vmem(in_ptr++#1) //[P, 1]first 32 depths of data
d31d00.uw = vrmpy(d31d00.ub, filt_offset.ub) //[P, 1]32 sums of 4
width_bytes = asl(in_width, #2) //[P, 1]convert to bytes depth = 4
} {
delta128 = #0 //[P, 2]carry between 32 length blocks
in_width = lsr(in_width, #5) //[P, 2]in_width/32
preds = vmem(cntrl+#6) //[P, 2]vector of predicate cntrls
e2 = add(e1, e1) //[P, 2]entry 1 of predictes
} {
in_height = mpyi(in_height, in_width) //[P, 3]total iterations of fused loop
vq1 = vand(preds, e1) //[P, 3]set up vec redicate 1
delta4 = vdelta(d31d00, perm1) //[0, 0]perform 1st shuffle even, even+odd
intw31w00 = d31d00 //[0, 0]lifetime renewal
} {
p3 = sp1loop0(.L_height, in_height) //[P, 4]set up loop firast iteration prolog
perm2 = vmem(cntrl+#1) //[P, 4]2nd cntrl of permute file
e3 = add(e2, e2) //[P, 4]3rd entry ni preidcates
if(vq1) intw31w00.w += delta4.w //[0, 1]groups of 1word 1_1_
} {
out_ptr_1 = sub(out_ptr, width_bytes) //[P, 5]previous line
vq2 = vand(preds, e2) //[P, 5]2nd predicate pairs of words
e4 = add(e3, e3) //[P, 5]4th preicate address
d31d00 = vmem(in_ptr++#1) //[0, 2]2nd 32 depth of data
} {
perm3 = vmem(cntrl+#2) //[P, 6]3rd permute groups of 8 words
vq3 = vand(preds, e3) //[P, 6]set up 3rd vector predicate
e5 = add(e4, e4) //[P, 6]5th predicate entry
delta8 = vdelta(intw31w00, perm2) //[0, 3]spermute pairs of words
} {
lcount = add(in_width, #1) //[P, 7]set up inner width counter
perm4 = vmem(cntrl+#3) //[P, 7]4th entryof permute controls
ne5 = not(e5) //[P, 7]make ne5 not e5
if(vq2) intw31w00.w += delta8.w //[0, 4]groups of 2words 11__11__
} {
out_ptr_1 = sub(out_ptr, width_bytes) //[P, 8]previous line
vzero = #0 //[P, 8]set up a vector 0
perm5 = vmem(cntrl+#4) //[P, 8]5th permute entry groups of 16 words
d31d00.uw = vrmpy(d31d00.ub, filt_offset.ub) //[0, 5]32 sums of 4
} {
perm6 = vmem(cntrl+#5) //[P, 9]6th entry groups of 32 words
out_ptr_1 = add(out_ptr_1, #-128) //[P, 9]correct for pipeline
intx31x00 = intw31w00 //[0, 6]break lifetime for sp loop
delta16 = vdelta(intw31w00, perm3) //[0, 6]permute groupes of 8 words
}
/* -------------------------------------------------------------- */
#if 0
{
if(vq3) intx31x00.w += delta16.w //[0, 7]groups of 4words 1111____1111____
delta4 = vdelta(d31d00, perm1) //[1, 0]
intw31w00 = d31d00 //[1, 0]
} {
if(vq1) intw31w00.w += delta4.w //[1, 1]groups of 1word 1_1_
} {
delta32 = vdelta(intx31x00, perm4) //[0, 9]
d31d00 = vmem(in_ptr++#1) //[1, 2]
} {
vq4 = vand(preds, e4) //[0,10]
delta8 = vdelta(intw31w00, perm2) //[1, 3]
} {
if(vq4) intx31x00.w += delta32.w //[0,11]groups of 8words 11111111________
if(vq2) intw31w00.w += delta8.w //[1, 4]groups of 2words 11__11__
} {
d31d00.uw = vrmpy(d31d00.ub, filt_offset.ub) //[1, 5]32 sums of 4
} {
inty31y00 = intx31x00 //[0,13]
intx31x00 = intw31w00 //[1, 6]
delta16 = vdelta(intw31w00, perm3) //[1, 6]
}
#endif
/* -------------------------------------------------------------- */
.balign 32
.L_height:
{
prev_line = vmem(out_ptr_1++#1) //[0,14]
if(vq3) intx31x00.w += delta16.w //[1, 7]groups of 4words 1111____1111____
delta4 = vdelta(d31d00, perm1) //[2, 0]
intw31w00 = d31d00 //[2, 0]
} {
p2 = cmp.eq(e5,ne5) //[0,15]enable p2
delta64 = vdelta(inty31y00, perm5) //[0,15]
vq5 = vand(preds, e5) //[0,15]
if(vq1) intw31w00.w += delta4.w //[2, 1]groups of 1word 1_1_
} {
lcount = add(lcount, #-1) //[0,16]
if(vq5) inty31y00.w += delta64.w //[0,16]groups of16words 111--111___--___
delta32 = vdelta(intx31x00, perm4) //[1, 9]
d31d00 = vmem(in_ptr++#1) //[2, 2]
} {
if(!p2) prev_line = vzero //[0,17]
delta128.w = vadd(inty31y00.w, delta128.w) //[0,17]add previous last value
vq4 = vand(preds, e4) //[1,10]
delta8 = vdelta(intw31w00, perm2) //[2, 3]
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[0,18]
if(p3)vmem(out_ptr++#1) = prev_line.new //[0,18]
if(vq4) intx31x00.w += delta32.w //[1,11]groups of 8words 11111111________
if(vq2) intw31w00.w += delta8.w //[2, 4]groups of 2words 11__11__
} {
p0 = cmp.eq(lcount, #0) //[0,19]
if(p0.new) ne5_lcount = combine(e5, in_width) //[0,19]
delta128 = vdelta(delta128, perm6) //[0,19]full replication of last word
d31d00.uw = vrmpy(d31d00.ub, filt_offset.ub) //[2, 5]32 sums of 4
} {
if(p0) delta128 = vzero //[0,20]
inty31y00 = intx31x00 //[1,13]
intx31x00 = intw31w00 //[2, 6]
delta16 = vdelta(intw31w00, perm3) //[2, 6]
}:endloop0
/* -------------------------------------------------------------- */
#if 0
{
prev_line = vmem(out_ptr_1++#1) //[1,14]
if(vq3) intx31x00.w += delta16.w //[2, 7]groups of 4words 1111____1111____
} {
p2 = cmp.eq(e5,ne5) //[1,15]enable p2
delta64 = vdelta(inty31y00, perm5) //[1,15]
vq5 = vand(preds, e5) //[1,15]
} {
lcount = add(lcount, #-1) //[1,16]
if(vq5) inty31y00.w += delta64.w //[1,16]groups of16words 111--111___--___
delta32 = vdelta(intx31x00, perm4) //[2, 9]
} {
if(!p2) prev_line = vzero //[1,17]
delta128.w = vadd(inty31y00.w, delta128.w) //[1,17]add previous last value
vq4 = vand(preds, e4) //[2,10]
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[1,18]
vmem(out_ptr++#1) = prev_line.new //[1,18]
if(vq4) intx31x00.w += delta32.w //[2,11]groups of 8words 11111111________
} {
p0 = cmp.eq(lcount, #0) //[1,19]
if(p0.new) ne5_lcount = combine(e5, in_width) //[1,19]
delta128 = vdelta(delta128, perm6) //[1,19]full replication of last word
} {
if(p0) delta128 = vzero //[1,20]
inty31y00 = intx31x00 //[2,13]
}
#endif
/* -------------------------------------------------------------- */
{
prev_line = vmem(out_ptr_1++#1) //[2,14]load previouls line value
} {
p2 = cmp.eq(e5,ne5) //[2,15]conditionally enable p2
delta64 = vdelta(inty31y00, perm5) //[2,15]replicate element 31 across
vq5 = vand(preds, e5) //[2,15]extract 5ths predicate
} {
if(vq5) inty31y00.w += delta64.w //[2,16]groups of16words 111--111___--___
} {
if(!p2) prev_line = vzero //[2,17]if 1st line kill previous line add
delta128.w = vadd(inty31y00.w, delta128.w) //[2,17]add previous last value
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[2,18]add previous carry to current block
vmem(out_ptr++#1) = prev_line.new //[2,18]store final 32 integrals
} {
jumpr r31 //[E, 0]return to caller
}
/* -------------------------------------------------------------- */
.L_end:
/*======================================================================*/
.size ivint_asm, .L_end-ivint_asm
|
XiaoMi/nnlib | 17,391 | hexagon/asm_src/dwconv2dbbb_s2_d32_v60_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements along depth, do only simple
* convolution.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 07/ 6/17 created
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = 928 bytes
* STACK = 80 bytes
* ASSUMPTIONS
*/
/*=============================================================================*/
.text
.file "dwconv2dbbb_s2_d32_v60_h.S"
.global dwconv2dbbb_s2_v60_asm
.balign 32
.type dwconv2dbbb_s2_v60_asm, @function
dwconv2dbbb_s2_v60_asm:
/*=============================================================================*/
//stride assumed 1 filt width assumed 3 - stride 2 requires new function
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define next_in_width_depth r3 //width*depth*(stride==1)
#define next_out_width_depth r4 //next output line amount in bytes
#define next_in_width_32 r5 //width*32*(stride==1)
#define next_out_width_32 r16 //0next output line amount in bytes
#define in_depth r17 //1 total in depth split into rows of depth 32
#define out_width r18 //2is amount of work to be done
#define out_height r19 //3 number of vertical lines to perform
#define filt_height r20 //4 filt_height lines per filter
#define ptr_max r21 //5 maximum and minum buffer
#define recip_level r22 //6 255 / (MAX - MIN) - used to scale to bytes
#define filt_sumi r23 //7 gemsumb
#define stride_height r24 //8 vertical strideing any number
#define zshift r26 //9 shift correction for small accs
#define padding r27 //10 padding = 1 then shift 8 pad else shift 0
//-----------------------------------------------------------------
#define c4 r6 //
#define s16 r9 //const = 16
#define in_wide_deep_high_256 r8 //width*depth*filt_height - 256
#define depth r10 //current depth used
#define ptr_x0 r11 //
#define ptr_x1 r12 //
#define ptr_z0 r13 //
#define ptr_z1 r14 //
#define ptr_w r15 //
#define filt_sum r22 //
#define col_count r25 //
#define out_wide_deep_128 r7 //advance ptr 128 along and pack to current line start
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0
#define woffset v1
#define s00 v2
#define s01 v3
#define s10 v4
#define s11 v5
#define s00_s v7
#define s01_s v8
#define s10_s v9
#define s11_s v17
#define d1_d0 v11:10
#define d3_d2 v13:12
#define d0 v10
#define d1 v11
#define d2 v12
#define d3 v13
#define d1d0 v10
#define d3d2 v12
#define d3_d0 v12
#define y0 v21 //
#define y1 v24 //
#define y2 v16 //
#define x0 v10 //
#define x1 v10 //
#define x2 v10 //
#define z3210 v26 //
#define z5432 v28 //
#define z7654 v29 //
#define zba98 v9 //
#define z9876 v6 //
#define z54__ v19 //
#define z__76 v20 //
#define z5476 v27 //
#define w_210 v22 //
#define ww210 v18 //
#define u_210 v23 //
#define maxo_maxe v31:30
#define mino_mine v15:14
#define maxe v30
#define mine v14
#define maxo v31
#define mino v15
#define SSR <<1:rnd:sat:shift //simplify mpy instruction
/*=============================================================================*/
{ allocframe(#72) // 0th entry on stack is (72+8)/4 =20 ints
maxe = #0 //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
s16= #16 //
c4 = #-4 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
r23 = ##0x7fffffff //max pos
} {
mine = vsplat(r23) //
zshift = memw(sp+#29<<2) //1 - #8 2 - 0
} {
maxe.w = vsub(maxe.w, mine.w) //
next_out_width_32 = memw(sp+#20<<2) //
in_depth = memw(sp+#21<<2) //
} {
out_width = memw(sp+#22<<2) //
out_height = memw(sp+#23<<2) //
} {
recip_level = memw(sp+#26<<2) //
padding = memw(sp+#30<<2) //1 - #8 2 - 0
} {
vrecip = vsplat(recip_level) //
ptr_max = memw(sp+#25<<2) //
filt_sumi = memw(sp+#27<<2) //
} {
out_wide_deep_128=add(next_out_width_depth,#-128) //
filt_height = memw(sp+#24<<2) //
stride_height = memw(sp+#28<<2) //skip n vert lines
} {
in_wide_deep_high_256=add(next_in_width_depth, #-256) //
in_depth = lsr(in_depth, #5) // 1/32
filt_height = add(filt_height, #-1) //
}
.balign 32
.L_height:
{
col_count = out_width //
ptr_z0 = ptr_zi //
ptr_zi=add(ptr_zi,next_out_width_depth) //
} {
ptr_x0 = ptr_xi //
ptr_xi+=mpyi(next_in_width_depth,stride_height) //
}
.balign 32
.L_width:
{
loop1(.L_depth, in_depth) //
filt_sum = filt_sumi //
ptr_w = ptr_wi //restart filter stream
col_count = add(col_count, #-4) //
} {
x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
ptr_x1 = ptr_x0 //[P,0]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_depth:
{
x1.tmp = vmem(ptr_x1+#1) //[0,1]
y1.b = vshuff(x1.b) //[0,1]
ptr_x0 = add(ptr_x0, next_in_width_32) //[P,1]
} {
x2.tmp = vmem(ptr_x1+#2) //[0,2]
y2.b = vshuff(x2.b) //[0,2]
ptr_z1 = ptr_z0 //[P,2]
ptr_z0 = add(ptr_z0, next_out_width_32) //[P,2]
} {
z3210.b = vshuff(y0.b) //[0,3]x3210
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,3]move to next pt in same depth position
woffset = vmem(filt_sum++#1) //[P,3]
loop0(.L_vloop, filt_height) //[P,3]can have a filter of Nx3 stride = 1
} {
u_210.tmp = vmem(ptr_w++#1) //[0,4]
w_210.w = vasl(u_210.w, padding) //[0,4]
z7654.b = vshuff(y1.b) //[0,4]x7654
s00 = woffset //[P,4]filter offset * xoffset and bias
} {
zba98.b = vshuff(y2.b) //[0,5]
s01 = woffset //[P,5]filter offset * xoffset and bias
s10 = woffset //[P,5]filter offset * xoffset and bias
s11 = woffset //[P,5]filter offset * xoffset and bias
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_vloop:
{ s00.w += vrmpy(z3210.ub, w_210.b) //[0,6]filter even output
z54__.w = vasl(z7654.w, s16) //[0,6]
x0.tmp = vmem(ptr_x1+#0) //[1,0]
y0.b = vshuff(x0.b) //[1,0]
} {
s10.w += vrmpy(z7654.ub, w_210.b) //[0,7]z5432
z__76.uw = vlsr(z7654.uw, s16) //[0,7]
x1.tmp = vmem(ptr_x1+#1) //[1,1]
y1.b = vshuff(x1.b) //[1,1]
} {
ww210 = w_210 //[0,8]
z5432.h = vshuffo(z54__.h, z3210.h) //[0,8]
x2.tmp = vmem(ptr_x1+#2) //[1,2]
y2.b = vshuff(x2.b) //[1,2]
} {
z9876.h = vshuffe(zba98.h, z__76.h) //[0,9]
z3210.b = vshuff(y0.b) //[1,3]x3210
ptr_x1 =add(ptr_x1, next_in_width_depth) //[1,3]move to next pt in same depth position
} {
s01.w += vrmpy(z5432.ub, ww210.b) //[0,10]filter even output
u_210.tmp = vmem(ptr_w++#1) //[1,4]
w_210.w = vasl(u_210.w, padding) //[1,4]
z7654.b = vshuff(y1.b) //[1,4]x7654
} {
s11.w += vrmpy(z9876.ub, ww210.b) //[0,11]filter even output
zba98.b = vshuff(y2.b) //[1,5]
}:endloop0
/* --------------------------------------------------------------------------- */
{ s00.w += vrmpy(z3210.ub, w_210.b) //[1,6]filter even output
z54__.w = vasl(z7654.w, s16) //[1,6]
} {
s10.w += vrmpy(z7654.ub, w_210.b) //[1,7]z5432
z__76.uw = vlsr(z7654.uw, s16) //[1,7]
z5432.h = vshuffo(z54__.h, z3210.h) //[1,7]
} {
PV(2)
} {
PV(4)
} {
z9876.h = vshuffe(zba98.h, z__76.h) //[1,8]
s00_s.w = vasl(s00.w, zshift) //[E,8]
mine.w = vmin(mine.w, s00.w) //[E,8]
maxe.w = vmax(maxe.w, s00.w) //[E,8]
} {
s01.w += vrmpy(z5432.ub, w_210.b) //[1,9]filter even output
s10_s.w = vasl(s10.w, zshift) //[W,9]
maxe.w = vmax(maxe.w, s10.w) //[E,9]
} {
s11.w += vrmpy(z9876.ub, w_210.b) //[1,10]filter even output
mine.w = vmin(mine.w, s10.w) //[E,10]
maxe.w = vmax(maxe.w, s01.w) //[E,10]
} {
PV(3)
} {
PV(5)
} {
s01_s.w = vasl(s01.w, zshift) //[E,11]
d0.w = vmpye(s00_s.w, vrecip.uh) //[E,11]
mine.w = vmin(mine.w, s01.w) //[E,11]
}
/* --------------------------------------------------------------------------- */
{ s11_s.w = vasl(s11.w, zshift) //[E,12]
d0.w += vmpyo(s00_s.w, vrecip.h):SSR //[E,12]
maxe.w = vmax(maxe.w, s11.w) //[E,12]
} {
d1.w = vmpye(s01_s.w, vrecip.uh) //[E,13]
mine.w = vmin(mine.w, s11.w) //[E,13]
} {
d1.w += vmpyo(s01_s.w, vrecip.h):SSR //[E,14]
} {
d2.w = vmpye(s10_s.w, vrecip.uh) //[E,15]
} {
d2.w += vmpyo(s10_s.w, vrecip.h):SSR //[E,16]
d1d0.h = vpacke(d1.w, d0.w) //[E,16]
} {
d3.w = vmpye(s11_s.w, vrecip.uh) //[E,17]
}
{
d3.w += vmpyo(s11_s.w, vrecip.h):SSR //[E,18]
} {
x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
ptr_x1 = ptr_x0 //[P,0]
} {
d3d2.h = vpacke(d3.w, d2.w) //[E,20]
} {
} {
d3_d0.ub = vpack(d3d2.h,d1d0.h):sat //[E,22]
vmem(ptr_z1+#0) = d3_d0.new //[E,22]
}:endloop1 //end depth
/* --------------------------------------------------------------------------- */
{ ptr_x0=sub(ptr_x0,in_wide_deep_high_256) //next inputs
ptr_z0=sub(ptr_z0,out_wide_deep_128) //next output
p0 = cmp.eq(col_count, #0) //
if(!p0.new) jump:t .L_width //
}
/* --------------------------------------------------------------------------- */
{ out_height = add(out_height, #-1) //
p0 = cmp.eq(out_height, #1) //
if(!p0.new) jump:t .L_height //
}
/* --------------------------------------------------------------------------- */
{
loop0(.L_peak, #4) //
maxo_maxe = vdeal(maxe, maxe, c4) //
}
.L_peak:
{
maxe.w = vmax(maxe.w, maxo.w) //
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //
}:endloop0
{
maxe.w = vmax(maxe.w, maxo.w) //
vmem(ptr_max+#0) = maxe.new //
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
vmem(ptr_max+#1) = mine.new //
}
/* --------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
.size dwconv2dbbb_s2_v60_asm, .L_end-dwconv2dbbb_s2_v60_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 11,790 | hexagon/asm_src/gemaddvvm_h.S |
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemaddvvm_asm */
/* */
/* DESCRIPTION */
/* Add y row to each row of matrix z, add column values */
/* x to all columns of z. */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 07/09/16 created */
/*======================================================================*/
/* CYCLE-COUNT: */
/* */
/* -> 5*N/128+6 */
/* */
/* MEMORY */
/* CODESIZE = 240 bytes */
/* ASSUMPTIONS */
/* z output and input data is 128byte aligned and multiple of 32 */
/* y input data is 128byte aligned and multiple of 32 */
/* x is aligned to 8bytes */
/* C MODEL */
/*======================================================================*/
#if 0
void gemaddvmm_cn (int *x, int *y, int *z, int N, int M, int * maxmin)
{
int i, j;
for (i=0; i < N; i++) {
for (j=0; j < 32; j++) {
z[i*M+j] += x[i] + y[j] ;
maxmin[1] = (z[i*M+j] > maxmin[1] ? z[i*M+j] : maxmin[1];
maxmin[0] = (z[i*M+j] < maxmin[0] ? z[i*M+j] : maxmin[0];
}
}
return;
}
#endif
/*======================================================================*/
.text
.file "gemaddvvm_h.S"
.global gemaddvvm_asm
.balign 32
.type gemaddvvm_asm, @function
gemaddvvm_asm:
/*======================================================================*/
#define ptr_x0 r0 //row sums
#define ptr_y0 r1 //column sums
#define ptr_z0 r2 //product accumulator
#define n r3 //size of array
#define m r4 //size of array
#define ptr_maxmin r5 //
#define reset r9 //
#define sum1sum0 r11:10 //
#define sum1 r11 //
#define sum0 r10 //
#define ptr_z1 r8 //
#define c4 r6 //
#define mstride M0 //
/*======================================================================*/
#define x0 v0 //
#define z0 v1 //
#define z1 v2 //
#define x2 v3 //
#define z2 v4 //
#define z3 v5 //
#define y0 v6 //
#define maxomaxe v9:8 //
#define maxo v9 //
#define maxe v8 //
#define max v8 //
#define minomine v11:10 //
#define mino v11 //
#define mine v10 //
#define min v10 //
/*======================================================================*/
{
y0 = vmem(ptr_y0+#0) //[P, 0]
n = lsr(n, #1) //[P, 0]
m = asl(m, #2) //[P, 0]
} {
dcfetch(ptr_x0+#1<<5) //[P, 1]
mstride = m //[P, 1]
n = add(n, #-1) //[P, 1]
max = vmem(ptr_maxmin+#1) //[P, 1]
} {
min = vmem(ptr_maxmin+#0) //[P, 1]
ptr_z1 = ptr_z0 //[P, 2]
sum1sum0 = memd(ptr_x0++#1<<3)//[0, 0]
} {
reset = memw(sp+#0) //[P, 2]
x0 = vsplat(sum0) //[0, 1]
z0.tmp = vmem(ptr_z0++mstride)//[0, 1]
z0.w = vadd(y0.w, z0.w) //[0, 1]
} {
p2 = !cmp.eq(reset, #0) //reset 0 accumulate
x2 = vsplat(sum1) //[0, 2]
z1.w = vadd(x0.w, z0.w) //[0, 2]
vmem(ptr_z1++mstride)= z1.new //[0, 2]
} {
c4 = #4 //
if(p2) max = z1 //[1, 6]
if(p2) min = z1 //[1, 6]
loop0(.L_loopN, n) //[P, 5]
} {
z2.tmp = vmem(ptr_z0++mstride)//[0, 3]
z2.w = vadd(x2.w, z2.w) //[0, 3]
dcfetch(ptr_x0+#3<<5) //[0, 3]
min.w = vmin(min.w, z1.w) //[0, 3]
}
#if 0
{
max.w = vmax(max.w, z1.w) //[0, 4]
z3.w = vadd(y0.w, z2.w) //[0, 4]
vmem(ptr_z1++mstride)= z3.new //[0, 4]
sum1sum0 = memd(ptr_x0++#1<<3)//[1, 0]
} {
max.w = vmax(max.w, z3.w) //[0, 5]
x0 = vsplat(sum0) //[1, 1]
z0.tmp = vmem(ptr_z0++mstride)//[1, 1]
z0.w = vadd(y0.w, z0.w) //[1, 1]
} {
min.w = vmin(min.w, z3.w) //[0, 6]
x2 = vsplat(sum1) //[1, 2]
z1.w = vadd(x0.w, z0.w) //[1, 2]
vmem(ptr_z1++mstride)= z1.new //[1, 2]
} {
z2.tmp = vmem(ptr_z0++mstride)//[1, 3]
z2.w = vadd(x2.w, z2.w) //[1, 3]
dcfetch(ptr_x0+#3<<5) //[1, 3]
min.w = vmin(min.w, z1.w) //[1, 3]
}
#endif
.balign 32
.L_loopN:
{
max.w = vmax(max.w, z1.w) //[1, 4]
z3.w = vadd(y0.w, z2.w) //[1, 4]
vmem(ptr_z1++mstride)= z3.new //[1, 4]
sum1sum0 = memd(ptr_x0++#1<<3)//[2, 0]
} {
min.w = vmin(min.w, z3.w) //[1, 5]
x0 = vsplat(sum0) //[2, 1]
z0.tmp = vmem(ptr_z0++mstride)//[2, 1]
z0.w = vadd(y0.w, z0.w) //[2, 1]
} {
max.w = vmax(max.w, z3.w) //[1, 6]
x2 = vsplat(sum1) //[2, 2]
z1.w = vadd(x0.w, z0.w) //[2, 2]
vmem(ptr_z1++mstride)= z1.new //[2, 2]
} {
z2.tmp = vmem(ptr_z0++mstride)//[2, 3]
z2.w = vadd(x2.w, z2.w) //[2, 3]
dcfetch(ptr_x0+#3<<5) //[2, 3]
min.w = vmin(min.w, z1.w) //[2, 3]
}:endloop0
{
max.w = vmax(max.w, z1.w) //[2, 4]
z3.w = vadd(y0.w, z2.w) //[2, 4]
vmem(ptr_z1++mstride)= z3.new //[2, 4]
} {
loop0(.L_peak, #5)
min.w = vmin(min.w, z3.w) //[2, 5]
}
.L_peak:
{
maxomaxe = vshuff(maxe, maxe, c4)
} {
maxe.w = vmax(maxo.w, maxe.w)
minomine = vshuff(mine, mine, c4)
} {
c4 = add(c4, c4)
mine.w = vmin(mino.w, mine.w)
}:endloop0
{ vmem(ptr_maxmin+#1) = max //[E, 0]
} {
vmem(ptr_maxmin+#0) = min //[E, 1]
}{
jumpr r31 //[E, 1]
}
/*======================================================================*/
.L_end:
.size gemaddvvm_asm, .L_end-gemaddvvm_asm
/*======================================================================*/
.global gemaddvvm_asm1
.balign 32
.type gemaddvvm_asm1, @function
gemaddvvm_asm1:
/*======================================================================*/
{
y0 = vmem(ptr_y0+#0) //[P, 0]
n = lsr(n, #1) //[P, 0]
m = asl(m, #2) //[P, 0]
} {
dcfetch(ptr_x0+#1<<5) //[P, 1]
mstride = m //[P, 1]
n = add(n, #-1) //[P, 1]
} {
ptr_z1 = ptr_z0 //[P, 2]
sum1sum0 = memd(ptr_x0++#1<<3)//[0, 0]
} {
x0 = vsplat(sum0) //[0, 1]
z0.tmp = vmem(ptr_z0++mstride)//[0, 1]
z0.w = vadd(y0.w, z0.w) //[0, 1]
} {
x2 = vsplat(sum1) //[0, 2]
z1.w = vadd(x0.w, z0.w) //[0, 2]
vmem(ptr_z1++mstride)= z1.new //[0, 2]
} {
c4 = #4 //
loop0(.L1_loopN, n) //[P, 5]
} {
z2.tmp = vmem(ptr_z0++mstride)//[0, 3]
z2.w = vadd(x2.w, z2.w) //[0, 3]
dcfetch(ptr_x0+#3<<5) //[0, 3]
}
.balign 32
.L1_loopN:
{
z3.w = vadd(y0.w, z2.w) //[1, 4]
vmem(ptr_z1++mstride)= z3.new //[1, 4]
sum1sum0 = memd(ptr_x0++#1<<3)//[2, 0]
} {
x0 = vsplat(sum0) //[2, 1]
z0.tmp = vmem(ptr_z0++mstride)//[2, 1]
z0.w = vadd(y0.w, z0.w) //[2, 1]
} {
x2 = vsplat(sum1) //[2, 2]
z1.w = vadd(x0.w, z0.w) //[2, 2]
vmem(ptr_z1++mstride)= z1.new //[2, 2]
} {
z2.tmp = vmem(ptr_z0++mstride)//[2, 3]
z2.w = vadd(x2.w, z2.w) //[2, 3]
dcfetch(ptr_x0+#3<<5) //[2, 3]
}:endloop0
{ z3.w = vadd(y0.w, z2.w) //[2, 4]
vmem(ptr_z1++mstride)= z3.new //[2, 4]
} {
jumpr r31 //[E, 1]
}
/*======================================================================*/
.L1_end:
.size gemaddvvm_asm1, .L1_end-gemaddvvm_asm1
|
XiaoMi/nnlib | 15,018 | hexagon/asm_src/repstream2_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Memory
CODESIZE 304 bytes
STACK 16 bytes
Description
Stride 1 or 2 Shuffle the stream with itself to feed the new vrmpy ops in V65
and while we are at it, do the alignment to save ops
*/
/* --------------------------------------------------------------------------- */
.text
.global repstream2_asm
.balign 32
.type repstream2_asm, @function
repstream2_asm:
#if 1
/* --------------------------------------------------------------------------- */
#define ptr_x0 r0 //input data raw
#define ptr_z1 r1 //output cbuf pointer aligned/shuffled data
#define next_in_width r2 //width of padded input
#define in_depth r3 //input depth
#define buf_fill r4 //number of lines to fill
#define rpad_lpad r5 //right and left pad on input requirement packed
#define stride_width r13 //stride_width
#define cbuf_base r14 //base of the circular buffer
#define buf_height r15 //number of total logical lines
#define inzero r12 //activation zero value
/* --------------------------------------------------------------------------- */
#define cbuf_eob r16 //end of circ buf
#define cbuf_size r17 //size of cicular buffer
#define width r10 //width in 128byte block
#define width_cnt r4 //width left in 128byte block
#define rpad r11 //right pad used to minimize stray maxes
#define lpad r6 //left pad that gets removed
#define cm4 r7 //shuffle ints
#define ptr_x1 r8 //temp input ptr
#define buf_width r9 //width of circ buffer 64*(next_in_width+3-lpad+rpad)&(-4)
/* --------------------------------------------------------------------------- */
#define x3x2x1x0 v0 //input data
#define x7x6x5x4 v1 //next input data
#define y3y2y1y0 v4 //aligned input data
#define y7y6y5y4 v5 //delayed aligned inout data
#define ybyay9y8 v6 //delayed by 2 aligned data
#define z73z62 v3 //shuffled delayed input
#define z51z40 v2 //shuffled delayed input
#define z73z62_z51z40 v3:2 //shuffled delayed input
#define vin_zero v7
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/* --------------------------------------------------------------------------- */
{ allocframe(#56) //0th entry on stack is (112+8)/4=30 ints
} {
memd(sp+#0<<2) = r17:16 //
memd(sp+#2<<2) = r19:18 //
} {
width = add(next_in_width, #3) //[S, 0]round to nearest 4
inzero = memw(sp+#19<<2) //[S, 0]
} {
width = sub(width.L, rpad_lpad.L) //[S, 1]next_in_width-lpad+3
inzero = vsplatb(inzero) //[S, 2]
} {
rpad = lsr(rpad_lpad, #18) //[S, 1]next_in_width-lpad+3+rpad
width = lsr(width, #2) //[S, 2]number of 128byte blks to fetch
buf_height = memw(sp+#18<<2) //[S, 0]
} {
cbuf_size = mpyi(buf_height, in_depth) //
rpad = add(rpad, width) //
buf_fill = mpyi(buf_fill, in_depth) //
} {
vin_zero = vsplat(inzero) //[S, 3]
cbuf_size = mpyi(cbuf_size, rpad) //
cm4 = #-4 //[S, 1]shuffle ints
} {
cbuf_size = asl(cbuf_size, #3) //
lpad = asl(rpad_lpad, #5) //[S, 1]alignment % 128
stride_width = memw(sp+#16<<2) //[S, 0] stride_width
cbuf_base = memw(sp+#17<<2) //[S, 0]
} {
cbuf_eob = add(cbuf_base, cbuf_size) //
p0 = cmp.eq(stride_width, #1) //[S, 2]is stride = 1? (or 2)
buf_fill = lsr(buf_fill, #5) //
} {
cbuf_eob = add(cbuf_eob, #-4) //
loop1(.L_rows, buf_fill) //[S, 3]loop over num lines
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_rows:
{ p3 =sp1loop0(.L_cols, rpad) //[P, 0]inner loop width
width_cnt = add(width, #-1) //[P, 0]
x3x2x1x0 = vmem(ptr_x0+#0) //[P, 0]load 1st 128
x7x6x5x4 = vin_zero //[P, 0]
} {
ptr_x1 = add(ptr_x0, #128) //[P, 1]
p1 = cmp.gt(width_cnt, #0) //[P, 1]
width_cnt = add(width_cnt, #-1) //[P, 1]
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 2]stride = 1 data
y3y2y1y0 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 2]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 2]update pipe
x7x6x5x4 = vin_zero //[P, 2]
} {
p1 = cmp.gt(width_cnt, #0) //[P, 3]
width_cnt = add(width_cnt, #-1) //[P, 3]
if(p0) jump .L_cols //[P, 3]skip 1 load
if(p0) ybyay9y8 = y3y2y1y0 //[P, 3]update 1 stage pipe y0 = y2
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 4]stride = 2 data
y7y6y5y4 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 4]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 4]update pipe
x7x6x5x4 = vin_zero //[P, 4]
} {
p1 = cmp.gt(width_cnt, #0) //[P, 5]
width_cnt = add(width_cnt, #-1) //[P, 5]
}
.L_cols:
{
if( p0) y3y2y1y0 = ybyay9y8 //[0, 0]update 1 stage pipe y0 = y2
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[0, 0]
ybyay9y8 = valign(x7x6x5x4,x3x2x1x0, lpad) //[0, 0]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[0, 0]update pipe
} {
if(p3) vmem(ptr_z1++#1) = z73z62 //[0, 1]empty pipe
p1 = cmp.gt(width_cnt, #0) //[0, 1]
width_cnt = add(width_cnt, #-1) //[0, 1]
x7x6x5x4 = vin_zero //[0, 1]
} {
z73z62_z51z40= vshuff(ybyay9y8,y3y2y1y0,cm4) //[0, 2]shuffle up for new vrmpy
vmem(ptr_z1++#1) = z51z40.new //[0, 2]empty pipe
if(!p0) y3y2y1y0 = y7y6y5y4 //[0, 2]update 2 stage shift reg
if(!p0) y7y6y5y4 = ybyay9y8 //[0, 2]y0 = y1 || y2 = y2
}:endloop0
/* --------------------------------------------------------------------------- */
{ vmem(ptr_z1++#1) = z73z62 //[E, 0]empty pipe
ptr_x0 = addasl(ptr_x0, next_in_width, #5) //[E, 0]update input next row
} {
p2 = cmp.gt(ptr_z1, cbuf_eob) //if circ buf write and end of buf
if(p2.new) ptr_z1 = sub(ptr_z1, cbuf_size) //then subtract buf size take to 1st row
}:endloop1
{
r17:16 = memd(sp+#0<<2) //
r19:18 = memd(sp+#2<<2) //
} {
dealloc_return //[T, 0]return to caller
}
/* --------------------------------------------------------------------------- */
#else
//older previous version
/* --------------------------------------------------------------------------- */
#define ptr_x0 r0 //input data raw
#define ptr_z1 r1 //output pointer aligned/shuffled data
#define next_in_width r2 //width of padded input
#define buf_height r3 //number of total lines
#define rpad_lpad r4 //right and left pad on input requirement packed
#define stride r5 //stride_width
#define inzero r12
#define width r10 //width in 128byte block
#define width_cnt r4 //width left in 128byte block
#define rpad r11
#define lpad r6
#define cm4 r7 //shuffle ints
#define ptr_x1 r8 //temp input ptr
#define in_width_32 r9 //total width in bytes of input
/* --------------------------------------------------------------------------- */
#define x3x2x1x0 v0 //input data
#define x7x6x5x4 v1 //next input data
#define y3y2y1y0 v4 //aligned input data
#define y7y6y5y4 v5 //delayed aligned inout data
#define ybyay9y8 v6 //delayed by 2 aligned data
#define z73z62 v3 //shuffled delayed input
#define z51z40 v2 //shuffled delayed input
#define z73z62_z51z40 v3:2 //shuffled delayed input
#define vin_zero v7
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/* --------------------------------------------------------------------------- */
{ in_width_32 = asl(next_in_width, #5) //[S, 0] distance to next row
inzero = memw(sp+#0<<2) //[S, 0]
width = add(next_in_width, #3) //[S, 0]round to nearest 4
rpad = lsr(rpad_lpad, #18) //[S, 0]extract right pad
} {
width = sub(width.L, rpad_lpad.L) //[S, 1]next_in_width-lpad+3
cm4 = #-4 //[S, 1]shuffle ints
lpad = asl(rpad_lpad, #5) //[S, 1]alignment % 128
} {
inzero = vsplatb(inzero) //[S, 2]
p0 = cmp.eq(stride, #1) //[S, 2]is stride = 1? (or 2)
width = lsr(width, #2) //[S, 2]number of 128byte blks to fetch
} {
vin_zero = vsplat(inzero) //[S, 3]
loop1(.L_rows, buf_height) //[S, 3]loop over num lines
rpad = add(rpad, width) //[S, 3]account for prolog
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_rows:
{ p3 =sp1loop0(.L_cols, rpad) //[P, 0]inner loop width
width_cnt = add(width, #-1) //[P, 0]
x3x2x1x0 = vmem(ptr_x0+#0) //[P, 0]load 1st 128
x7x6x5x4 = vin_zero //[P, 0]
} {
ptr_x1 = add(ptr_x0, #128) //[P, 1]
p1 = cmp.gt(width_cnt, #0) //[P, 1]
width_cnt = add(width_cnt, #-1) //[P, 1]
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 2]stride = 1 data
y3y2y1y0 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 2]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 2]update pipe
x7x6x5x4 = vin_zero //[P, 2]
} {
p1 = cmp.gt(width_cnt, #0) //[P, 3]
width_cnt = add(width_cnt, #-1) //[P, 3]
if(p0) jump .L_cols //[P, 3]skip 1 load
if(p0) ybyay9y8 = y3y2y1y0 //[P, 3]update 1 stage pipe y0 = y2
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 4]stride = 2 data
y7y6y5y4 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 4]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 4]update pipe
x7x6x5x4 = vin_zero //[P, 4]
} {
p1 = cmp.gt(width_cnt, #0) //[P, 5]
width_cnt = add(width_cnt, #-1) //[P, 5]
}
.L_cols:
{
if( p0) y3y2y1y0 = ybyay9y8 //[0, 0]update 1 stage pipe y0 = y2
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[0, 0]
ybyay9y8 = valign(x7x6x5x4,x3x2x1x0, lpad) //[0, 0]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[0, 0]update pipe
} {
if(p3) vmem(ptr_z1++#1) = z73z62 //[0, 1]empty pipe
p1 = cmp.gt(width_cnt, #0) //[0, 1]
width_cnt = add(width_cnt, #-1) //[0, 1]
x7x6x5x4 = vin_zero //[0, 1]
} {
z73z62_z51z40= vshuff(ybyay9y8,y3y2y1y0,cm4) //[0, 2]shuffle up for new vrmpy
vmem(ptr_z1++#1) = z51z40.new //[0, 2]empty pipe
if(!p0) y3y2y1y0 = y7y6y5y4 //[0, 2]update 2 stage shift reg
if(!p0) y7y6y5y4 = ybyay9y8 //[0, 2]y0 = y1 || y2 = y2
}:endloop0
/* --------------------------------------------------------------------------- */
{ vmem(ptr_z1++#1) = z73z62 //[E, 0]empty pipe
ptr_x0 = add(ptr_x0, in_width_32) //[E, 0]update input next row
}:endloop1
{ jumpr r31 //[T, 0]return to caller
}
#endif
/* --------------------------------------------------------------------------- */
.L_end:
.size repstream2_asm, .L_end-repstream2_asm
|
XiaoMi/nnlib | 20,469 | hexagon/asm_src/gvconv2dbbb_d32_s1_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/* DJ 05/17/17 speciaized version with hstride = 1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_d32_s1_h_v66.S"
.global gvconv2dbbbs1_v66_asm
.balign 32
.type gvconv2dbbbs1_v66_asm, @function
gvconv2dbbbs1_v66_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xi r0 //data aligned 128
#define ptr_wi r1 //weights aligned 128
#define ptr_zi r2 //results aligned 128
#define in_width r3 //(pad_l+in_width+pad_r) => 4 %4
#define out_width_stride_depth r4 //value in bytes to get to next full out row
#define out_width r5 //out_width_pad
#define stride_h_w r26 //0 stride_height|stride_width
#define in_depth r27 //1 %32
#define filt_width r17 //2 >= 1
#define filt_height r8 //3 >= 1filt_height lines per filter
#define out_height r9 //4 >= 1 number of vertical lines to perform
#define ptr_filtsum r10 //5 aligned 128
#define ptr_suma r11 //6 not used in this architeture
#define next_suma r13 //7 not used in this architecture
#define ptr_max r12 //8 aligned 128
#define recip_level r14 //9 recip is 31bit unsigned 0x7f800000000LL / max
#define out_align r1 //10 0, 32, 64, 96
/*=============================================================================*/
#define c4 r3 //
#define stride_h r26 //0 stride_height|stride_width
#define in_width_stride_h_depth r28 //in_width * stride_h * in_depth for next output
#define ptr_x0 r6 //r6 spare
#define ptr_x1 r7 //
#define ptr_x1_ptr_x0 r7:6 //
#define stride_w r18 //stride width =1
#define next_outputs r19 //jump to input ptr for next set of outputs
#define ptr_w r20 //
#define in_width_32 r22 //
#define ptr_x2 r23 //
#define ptr_z r24 //
#define col_count r25 //
#define col_count_ptr_z r25:24//packed together to save a slot
#define cm4_c128 r11:10//combine constants -4, 128
#define filt_cnt r18
#define ptr_x0_ r12
#define ptr_x1_ r13 //15 spare 14 spare
#define ptr_x1_ptr_x0_ r13:12
#define z_ptr r3
#define c8_c96 r15:14
/*=============================================================================*/
#define PV32(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v21 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210 v6 //
#define x3_prev v16 //previous value
#define xout v17 //realigned out
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxomaxe v13:12//
#define maxe v12 //
#define maxo v13 //
#define minomine v19:18//
#define mine v18 //
#define mino v19 //
#define biasvec v16 //
#define recipvec v15 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
/*=============================================================================*/
{ allocframe(#56) //0th entry on stack is (56+8)/4 =16 ints
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
r20 = ##0x7fffffff //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
mine = vsplat(r20) //
maxe = #0 //
} {
maxe.w = vsub(maxe.w, mine.w) //
stride_h_w = memw(sp+#16<<2) //extract strides h + w
in_depth = memw(sp+#17<<2) //
ptr_w = ptr_wi //[P,0]ptr_y=ptr_yi initialize filter pointer
} {
filt_height = memw(sp+#19<<2) //extract filt_height
filt_width = memw(sp+#18<<2) //extract filt_width
} {
filt_height = mpy(filt_height.L,in_depth.L)//filt_height*in_depth
out_height = memw(sp+#20<<2) //number of output lines
recip_level = memw(sp+#25<<2) //
} {
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
ptr_filtsum = memw(sp+#21<<2) //ptr pre computed weight sum
recipvec = vsplat(recip_level) //
memw(sp+#52) = ptr_wi //save wi for someone else
} {
memw(sp+#48) = r28
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
in_width_stride_h_depth = mpy(stride_h_w.H, in_depth.L)
} {
next_outputs=mpyi(filt_height,in_width) //filt_height*in_width*in_depth
out_align = memw(sp+#26<<2) //
wsum = vmem(ptr_filtsum+#0) //
filt_height = mpyi(filt_height, filt_width)//2d filter
} {
next_outputs = add(next_outputs, #-4) //1,2
loop0(.L_filt, filt_height) //[P,0]for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
c8_c96 = combine(#8, #96) //
} {
cm4_c128 = combine(#-4, #128) //combine 2 constants
next_outputs = asl(next_outputs, #5) //(flt_hight*in_width*in_depth/32-4*stride)*32
in_width_stride_h_depth=mpyi(in_width,in_width_stride_h_depth) //total vertical stride bytes
}
/*=============================================================================*/
{ ptr_x0 = ptr_xi //ptr_xi
out_height = add(out_height, #-1) //
ptr_z = add(ptr_zi, #-128) //
filt_cnt = filt_width
} {
p2 = !cmp.eq(r0, r0) //force p2 off
col_count = out_width //out_width
ptr_xi= add(ptr_xi,in_width_stride_h_depth) //ptr_x+=in_width*stride_h*in_depth)
p0 = cmp.eq(out_align, #0) //if no alignment enable store
} {
ptr_x1 = add(ptr_x0, #100) //[Pheight]setup initial pointer
z = vmem(ptr_x0+#0) //[Pheight]load 0-127
s3s2 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
s1s0 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
}
/*=============================================================================*/
.balign 64
.L_height:
.L_width:
.L_filt:
{
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 7]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 7]perform mac across 4 streams with saem weights
filt_cnt = add(filt_cnt, #-1)
} {
w0.tmp = vmem(ptr_w++#1) //[1, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 0]perform mac across 4 streams with saem weights
p3 = cmp.eq(filt_cnt, #0) //[0, 0]ki is k1/32 - 0
if(p3.new) ptr_x0 = add(ptr_x0, in_width_32)//[0, 0]move to next line ptr_y keeps going
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
ptr_x1_ptr_x0_= vaddw(ptr_x1_ptr_x0,c8_c96) //0, 1]ptr_x1_=add(ptr_x1,#8)||/ptr_x0_=add(ptr_x0, #96)
if(p3) filt_cnt = filt_width //[0, 1]
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
z_ptr = mux(p3, ptr_x0, ptr_x1_) //[0, 2]
ptr_x1_ = mux(p3, ptr_x0_, ptr_x1_) //[0, 2]
} {
w0.tmp = vmem(ptr_w++#1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(ptr_x1_, #4) //[0, 3]reset ptr for next row of filter taps
}:endloop0
/*=============================================================================*/
{ maxe.w = vmax(maxe.w, s0.w) //[E0, 0]see if s0 is max
mine.w = vmin(mine.w, s0.w) //[E0, 0]see if s0 is min
loop0(.L_filt, filt_height) //[E0, 0]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
x1.h = vpacke(y1.w, y0.w) //[E1, 0]packe low 16bits together
} {
ptr_x0 = sub(ptr_x0, next_outputs) //[E0, 1]reset data ptr to next 4
y0.w = vmpye(s0.w, recipvec.uh) //[E0, 1](s2 * recip + rnd)>>31
x3.h = vpacke(y3.w, y2.w) //[E1, 1]pack low 16bits together
col_count_ptr_z=vaddw(col_count_ptr_z,cm4_c128)//count -=4 ptr_z += 128
} {
maxe.w = vmax(maxe.w, s3.w) //[E0, 2]
mine.w = vmin(mine.w, s3.w) //[E0, 2]see if z0 is max
y0.w+= vmpyo(s0.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 2]
filt_cnt = filt_width
} {
y1.w = vmpye(s1.w, recipvec.uh) //[E0, 3](s2 * recip + rnd)>>31
maxe.w = vmax(maxe.w, s1.w) //[E0, 3]
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255 and pack low 8bits
ptr_x1 = add(ptr_x0, #100) //4 //setup initial pointer
} {
y1.w+= vmpyo(s1.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 4](s2 * recip + rnd)>>31
mine.w = vmin(mine.w, s1.w) //[E0, 4]see if z0 is max
maxe.w = vmax(maxe.w, s2.w) //[E0, 4]
p1 = cmp.eq(col_count, #0) //[E0, 4]compare for branch
} {
y2.w = vmpye(s2.w, recipvec.uh) //[E0, 5](s2 * recip + rnd)>>31
mine.w = vmin(mine.w, s2.w) //[E0, 5]see if z0 is max
s0 = wsum //[E0, 5]initialize accumulator 0
ptr_w = memw(sp+#52) ////[E0, 5]ptr_w=ptr_wi init filter pointer
} {
y2.w+= vmpyo(s2.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 6](s2 * recip + rnd)>>31
s1 = wsum //[E0, 6]initialize accumulator 1
xout = vlalign(x3210,x3_prev,out_align) //[E1, 6]
if(p2)vmem(ptr_z+#-1) = xout.new //[E1, 6]store 2nd 32bytes
} {
y3.w = vmpye(s3.w, recipvec.uh) //[E0, 7]#(s2 * recip + rnd)>>31
x3_prev = x3210 //[E1, 7]save data for next output align
p2 = p0 //[E1, 7]update predicate piplione
p0 = cmp.eq(r0, r0) //[E1, 7]set to true
} {
y3.w+= vmpyo(s3.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 8](s2 * recip + rnd)>>31
s3s2 = vcombine(wsum, wsum) //[E0, 8]initialize accumulator 2,3
if(!p1) jump:t .L_width //[E1, 8]
z = vmem(ptr_x0+#0) //pre load 0-127 for next row of filter
}//cols per line kernel loop width
/*=============================================================================*/
{ x1.h = vpacke(y1.w, y0.w) //[E1, 0]#>>16
p1 = cmp.eq(out_height, #0) //EE
p2 = !cmp.eq(r0, r0) //[Pheight]force p2 off
} {
x3.h = vpacke(y3.w, y2.w) //[E1, 1]#sat8 <0, >255
out_height = add(out_height, #-1) //Prolog width
col_count = out_width //[Pheight]out_width
} {
ptr_z = add(ptr_z, #128) //[E1, 3]unconditional ptr increment
ptr_x0 = ptr_xi //Prolog width ptr_xi
p0 = cmp.eq(out_align, #0) //[Pheight]if no alignment enable store
} {
ptr_xi = add(ptr_xi,in_width_stride_h_depth)//[Pheight]ptr_x+=in_width*stride_h*in_depth)
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255
filt_cnt = filt_width
} {
ptr_zi = add(ptr_zi, out_width_stride_depth)//EEnext out line for this depth segment
ptr_x1 = add(ptr_x0, #100) //4 //[Pheight]setup initial pointer
z = vmem(ptr_x0+#0) //[Pheight]load 0-127
} {
xout = vlalign(x3210, x3_prev, out_align) //[E1, 6]
vmem(ptr_z+#-1) = xout.new //[E1, 6]store 2nd 32bytes
ptr_z = add(ptr_zi, #-128) //
if(!p1) jump:t .L_height //EE
}//end lines per block//last cols per line
/*=============================================================================*/
{ c4 = #4 //constant
ptr_max = memw(sp+#24<<2) //ptr pre computed max value in output
} {
loop0(.L_minmax, #4) //[P, 0]
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]find max among 32values
}
/*=============================================================================*/
.L_minmax:
{ minomine=vshuff(mine,mine,c4) //[0, 1]find min among 32values
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
} {
mine.w = vmin(mino.w, mine.w) //[0, 2]
maxomaxe=vshuff(maxe,maxe,c4) //[1, 0]
}:endloop0
{ minomine=vshuff(mine,mine,c4) //[1, 1]
maxe.w = vmax(maxo.w, maxe.w) //[1, 1]
vmem(ptr_max+#0) = maxe.new //[E, 0]32max
} {
mine.w = vmin(mino.w, mine.w) //[1, 2]
vmem(ptr_max+#1) = mine.new //[E, 0]32min
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore r16, r17from stack
r19:18 = memd(sp+#8) //restore r18,r19
} {
r21:20 = memd(sp+#16) //restore r20,r11
r23:22 = memd(sp+#24) //restore r22,r13
} {
r25:24 = memd(sp+#32) //restore r24,r15
r27:26 = memd(sp+#40) //restore r26,r17
} {
r28 = memw(sp+#48)
} {
dealloc_return //restore fram and return
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbbs1_v66_asm, .L_end-gvconv2dbbbs1_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 14,160 | hexagon/asm_src/from_d32_h.S | /*
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTION
* from_d32 convert d32 image of padded_rounded_width,height,depth to
* regular non d32 format each slice of depth for each point on own line.
*
* input 0000
* 1111
* 2222
* 3333
* 4444
* 5555
* 6666
* outut 0-6 0123456012345601234560123456 <-how depth is transformed
*
* *depth multiple of 32
* *input width multiple of 4 and aligned to 128
*
* CODESIZE
* 240 bytes
*
* C MODEL
*/
#if 0
void from_d32(
uint8_t * data_d32, int in_width_pad, uint8_t * data, int width, int height, int depth)
{
int i,j,k;
for(i=0; i < height; i++)
{
for(j=0; j < width; j++)
{
for(k=0; k < depth; k++)
{
data[(i*width+j)*depth+k] = data_d32[(i*depth+(k/32)*32)*in_width_pad+j*32+(k%32)];
}
}
}
return;
}
#endif
/* ---------------------------------------------------------------- */
.text
.global from_d32_asm
.balign 32
.type from_d32_asm, @function
from_d32_asm:
/* ---------------------------------------------------------------- */
#define ptr_ini r0 //ptr to depth 32 data
#define next_width_d32 r1 //(pad+rnd(width,4))*32
#define out_ptri r2 //normal row
#define width r3 //output width after conversion
#define height r4 //number rows
#define depth r5 //normal depth mult of 32
/* ---------------------------------------------------------------- */
#define depth_count r18 // number of bytes left in depth
#define depth_iters r8 //round up to neaest 4
#define horz_iters r13 //width iteration round up to 4
#define ptr_in0 r17 //temp d32 in ptr
#define out_ptr0 r16 //normal temp output ptr
#define ptr_in0out_ptr0 r17:16 //packet data
#define ptr_in1 r11 //temp input ptr depth loop
#define out_ptr1 r10 //temp output ptr depth loop
#define out_width_depth r15 //size of normla output row
#define out_ptr2 r20 //tmp output ptr sub depth loop
#define c32 r6 //const for shuffling groups of 32
#define c64 r7 //const for shuffling groups of 64
#define in_width_depth r9 //size of padded d32 row
#define mdst r19 //tmp outpt rnded to 128
#define dalign r14 //ditance from aligned ptr start to end of data
#define width_count r12 //width counter pts left
#define scratch r21 //temp buffer on stack
#define max_width r22 //1,2,3,4 depths left
/* ---------------------------------------------------------------- */
#define x3333 v11 //dr32 row 3
#define x2222 v10 //dr32 row 2
#define x1111 v9 //dr32 row 1
#define x0000 v8 //dr32 row 0
#define x11010_x01010 v5:4 //0 and 1 shuffled
#define x11010 v5 //0 and 1 shuffled
#define x01010 v4 //0 and 1 shuffled
#define x13232_x03232 v7:6 //2 and 3 shuffled
#define x13232 v7 //2 and 3 shuffled
#define x03232 v6 //2 and 3 shuffled
#define x13210_x03210 v1:0 //0,1 and 2,3 shuffled
#define x13210 v1 //0,1 and 2,3 shuffled
#define x03210 v0 //0,1 and 2,3 shuffled
#define x33210_x23210 v3:2 //0,1 and 2,3 shuffled
#define x33210 v3 //0,1 and 2,3 shuffled
#define x23210 v2 //0,1 and 2,3 shuffled
#define qprolog q0 //1st part of last depth fragment store
#define qepilog q1 //2nd part of last depth fragment store
/* ---------------------------------------------------------------- */
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
/* ---------------------------------------------------------------- */
{ allocframe(#640) //allocate scratch
c32 = #-32 //shuffle 32bytes
c64 = #-64 //shuffle 64bytes
} {
sp = and(sp, #-128) //align stack to 128 bytes
out_width_depth = mpyi(width, depth) //size of normal row
depth_iters = lsr(depth, #5) //depth / 32
} {
memd(sp+#0) = r17:16 //save regs
memd(sp+#8) = r19:18 //save regs
in_width_depth=mpyi(next_width_d32,depth_iters)//size of padded row
depth_iters = lsr(depth_iters, #2) //depth /128
} {
memd(sp+#16) = r21:20 //save regs
memd(sp+#24) = r23:22 //save regs
horz_iters = add(width, #3) //round to 4
} {
horz_iters = lsr(horz_iters, #2) //round up to mult of 4
M0 = depth //incrment ptr
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_height:
{ height = add(height, #-1) //decrement height count
ptr_in0out_ptr0 = combine(ptr_ini, out_ptri) //next row ptrs
loop1(.L_loop_width, horz_iters) //loop for width
width_count = add(width, #4) //set up width counter
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_width:
{ out_ptr1 = out_ptr0 //next depth ptr
ptr_in1 = ptr_in0 //next input depth ptr
loop0(.L_loop_depth4, depth_iters) //loop of depth / 128 itns
depth_count = depth //loop counter for conditional store
} {
p0 = cmp.eq(depth_iters, #0) //depth < 4 ?
if(p0.new) jump:nt .L_epi_only //if depth < 4then skip to last chunk loop
width_count = add(width_count, #-4) //used up 4 more width elements
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_depth4: //do multiples of 4 of depth
{ out_ptr2 = out_ptr1 //set up 1st ptr
x0000 = vmem(ptr_in1+#0) //load row 0
ptr_in1 = add(ptr_in1, next_width_d32) //next d32 row
} {
x1111.tmp = vmem(ptr_in1+#0) //load row 1
x11010_x01010 = vshuff(x1111, x0000, c32) //shuffle rows 0 and 1
ptr_in1 = add(ptr_in1, next_width_d32) //next d32 row
} {
x2222 = vmem(ptr_in1+#0) //load row 2
ptr_in1 = add(ptr_in1, next_width_d32) //next d32 row
} {
x3333.tmp = vmem(ptr_in1+#0) //load row 3
x13232_x03232 = vshuff(x3333, x2222, c32) //shuffle rows 2 and 3
ptr_in1 = add(ptr_in1, next_width_d32) //next d32 row
} {
x13210_x03210 = vshuff(x03232,x01010,c64) //shuffle 0,1,2,3
out_ptr1 = add(out_ptr1, #128) //increment 128 for next tiome
} {
x33210_x23210 = vshuff(x13232,x11010,c64) //shuffle 0,1,2,3
depth_count = add(depth_count, #-128) //used up 128 worth of input
} {
vmemu(out_ptr2++M0) = x03210 //save 128bytes of depth 0
p0 = cmp.gt(width_count, #1) //are there >= 2?
} {
if(p0) vmemu(out_ptr2++M0) = x13210 //save 128bytes of depth 1
p0 = cmp.gt(width_count, #2) //are there >= 3?
} {
if(p0) vmemu(out_ptr2++M0) = x23210 //save 128bytes of depth 2
p0 = cmp.gt(width_count, #3) //are there >= 4?
} {
if(p0) vmemu(out_ptr2+#0) = x33210 //save 128bytes of depth 3
}:endloop0
.balign 32
.L_epi_only:
{ p0 = cmp.eq(depth_count, #0) //if out of depths skip
if(p0.new) jump:nt .L_skip_epi //
}
/* ----------------------------------------------------------- */
{ x0000 = vmem(ptr_in1+#0) //get last block of depth
ptr_in1 = add(ptr_in1, next_width_d32) //inc input ptr
scratch = add(sp, #128) //set up scratch buffer ptr
} {
x1111.tmp = vmem(ptr_in1+#0) //get last block of depth
x11010_x01010 = vshuff(x1111, x0000, c32) //shuffle 0 and 1
ptr_in1 = add(ptr_in1, next_width_d32) //inc input ptr
} {
x2222 = vmem(ptr_in1+#0) //get last block of depth
ptr_in1 = add(ptr_in1, next_width_d32) //inc input ptr
max_width = width_count //copy width_count
p0 = cmp.gt(width_count, #4) //if > 4 set to 4
} {
x3333.tmp = vmem(ptr_in1+#0) //get last block of depth
x13232_x03232 = vshuff(x3333, x2222, c32) //shuffle 2 and 3
ptr_in1 = add(ptr_in1, next_width_d32) //inc input ptr
} {
x13210_x03210 = vshuff(x03232, x01010, c64) //shuffle 0,1,2,3
vmem(scratch+#0) = x03210.new //store 1st depth chunk
if(p0) max_width = #4 //4 until width < 4
} {
vmem(scratch+#1) = x13210 //spec. store 2nd depth chunk
loop0(.L_last_depths, max_width) //excute 1,2,3 or 4
} {
x33210_x23210 = vshuff(x13232, x11010, c64) //shuffle 0,1,2,3
vmem(scratch+#2) = x23210.new //spec. store 3rd depth chunk
} {
vmem(scratch+#3) = x33210 //spec. store 4tt depth chunk
}
.balign 32
.L_last_depths:
{ mdst = and(out_ptr1, #127) //ptr to mod 128
x03210 = vmem(scratch++#1) //read last bit of depth from scratch
} {
qprolog = vsetq(out_ptr1) //find dist to ptr
dalign = add(mdst, depth_count) //dist to end of data
mdst = sub(#0, out_ptr1) //create left rotate
} {
x03210 = vror(x03210, mdst) //rotate left by ptr
} {
qepilog = vsetq(dalign) //do mask for 2nd store
p1 = cmp.gt(dalign, #127) //is it a double store?
if(p1.new) jump:nt .L_gt1280 //skip over logic for 1 part store
} {
qprolog = or(qprolog, !qepilog) //compound 2 masks
qepilog = and(qprolog, !qprolog) //cancel 2nd mask
}
.L_gt1280:
{ if( qepilog) vmem(out_ptr1+#1) = x03210 //cond store 2nd part
} {
if(!qprolog) vmem(out_ptr1++M0) = x03210 //store 1st part, in ptr
}:endloop0
/* ----------------------------------------------------------- */
.balign 32
.L_skip_epi:
{ ptr_in0 = add(ptr_in0, #128) //advace in d32 by 128
out_ptr0 = addasl(out_ptr0, depth, #2) //advance input by 4 depths
}:endloop1
/* ---------------------------------------------------------------- */
{ out_ptri = add(out_ptri, out_width_depth) //next output line
ptr_ini = add(ptr_ini, in_width_depth) //next input d32 line block
p0 = cmp.eq(height, #0) //next width
if(!p0.new) jump:t .L_loop_height //jump
}
/* ---------------------------------------------------------------- */
{ r17:16 = memd(sp+ #0) //retieve regs
r19:18 = memd(sp+ #8) //retieve regs
} {
r21:20 = memd(sp+#16) //retieve regs
r23:22 = memd(sp+#24) //retieve regs
} {
dealloc_return //return stack and out
}
.L_end:
/*=============================================================================*/
.size from_d32_asm, .L_end-from_d32_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 4,954 | hexagon/asm_src/vmemcpy128_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
.global vmemcpy_128
.type vmemcpy_128, @function
.balign 32
vmemcpy_128:
/* ============================================================================ */
#define dst r0 //dest ptr
#define src r1 //src ptr
#define length r2 //num bytes
#define kernel r3
#define rest r4
/* ============================================================================ */
#define x0 v0
#define y0 v1
/* ============================================================================ */
{ kernel = lsr(length, #(7+2)) // (length>>7)/4
rest = extractu(length,#2,#7) // (length>>7)%4
}{
p1 = cmp.gt(rest,#0) //
p0 = cmp.gt(kernel,#0) //
if (!p0.new) jump:nt .remain //
}{
loop0(.L_copy_loop, kernel) //
nop; nop; nop //
}
/* ============================================================================ */
.balign 32
.L_copy_loop:
{ x0.tmp = vmem(src++#1):nt //load next bloc
y0 = x0 //
vmem(dst++#1) = y0.new //store out
nop //
}{
x0.tmp = vmem(src++#1):nt //load next bloc
y0 = x0 //
vmem(dst++#1) = y0.new //store out
nop //
}{
x0.tmp = vmem(src++#1):nt //load next bloc
y0 = x0 //
vmem(dst++#1) = y0.new //store out
nop //
}{
x0.tmp = vmem(src++#1):nt //load next bloc
y0 = x0 //
vmem(dst++#1) = y0.new //store out
nop //
}:endloop0
.remain:
{ if (!p1) jumpr r31 //
p2 = cmp.gt(rest,#1) //
p3 = cmp.gt(rest,#2) //
}{
x0.tmp = vmem(src+#0):nt //load next bloc
y0 = x0 //
vmem(dst+#0) = y0.new //store out
if (!p2) jumpr r31 //
}{
x0.tmp = vmem(src+#1):nt //load next bloc
y0 = x0 //
vmem(dst+#1) = y0.new //store out
if (!p3) jumpr r31 //
}{
x0.tmp = vmem(src+#2):nt //load next bloc
y0 = x0 //
vmem(dst+#2) = y0.new //store out
jumpr r31 //
}
.L_end:
/*==============================================================================*/
.size vmemcpy_128, .L_end-vmemcpy_128
|
XiaoMi/nnlib | 15,869 | hexagon/asm_src/inconv2db2b2b2_d32_v60_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.text
.file "inconv2dbbb_d32_v60_h.S"
.global inconv2db2b2b2_v60_asm
.balign 32
.type inconv2db2b2b2_v60_asm, @function
/* parameters:
* r0 const uint8_t * in_bufe
* r1 const uint8_t * in_bufo
* r2 const uint8_t * weights
* r3 uint16_t * out_bufe
* r4 int in_width
* r5 int out_width_stride_depth
* PARMW(0) int out_width
* PARMW(1) int stride_h_w
* PARMW(2) int in_depth
* PARMW(3) int filt_width
* PARMW(4) int filt_height
* PARMW(5) int out_height
* PARMW(6) const int32_t * bias_add
* PARMW(7) const int32_t *suma
* PARMW(8) int32_t next_suma_row
* PARMW(9) int32_t * ptr_minmax
* PARMW(10) int32_t recip
* PARMW(11) int recip_shift
*/
inconv2db2b2b2_v60_asm:
/*=============================================================================*/
#define in_bufe r0
#define in_bufo r1
#define in_bufo_in_bufe r1:0
#define weights r2
#define out_bufe r3
#define in_width r4
#define out_width_stride_depth r5
#define recip_shift r6
#define recip recip_shift
#define c8 r7
#define xl03x00 r8
#define sum0 xl03x00
#define sum1 xl03x00
#define sum2 xl03x00
#define sum3 xl03x00
#define xl13x10 r9
#define xl13x10_xl03x00 r9:8
#define xl23x20 r10
#define xl33x30 r11
#define xl33x30_xl23x20 r11:10
#define xh03x00 r12
#define xh13x10 r13
#define xh13x10_xh03x00 r13:12
#define xh23x20 r14
#define xh33x30 r15
#define xh33x30_xh23x20 r15:14
#define in_width_depth r16
#define in_depth r17
#define stride_h_w r18
#define stride_h stride_h_w
#define in_width_stride_depth r19
#define next_outputs r20
#define filt_height r21
#define next_suma_row r22
#define bias_add r23
#define suma bias_add
#define ptr_xl0 r24
#define stride_w ptr_xl0
#define ptr_xh0 r25
#define ptr_xh0_ptr_xl0 r25:24
#define ptr_w r26
#define filt_width r27
#define out_x4 r28
#define out_height r30
#define active_sum r31
/*=============================================================================*/
#define shl0 v0
#define shl1 v1
#define shl2 v2
#define shl3 v3
#define shh0 v4
#define shh1 v5
#define shh2 v6
#define shh3 v7
#define w0l v8
#define w0h v9
#define wsum v10
#define recipvec v11
#define out0 v12
#define out1 v13
#define out2 v14
#define out3 v15
#define sy0 v16
#define sy1 v17
#define sy2 v18
#define sy3 v19
#define max_val v20
#define min_val v21
#define vzero v22
#define c80 v23
/*=============================================================================*/
#define SS (13*8)
#define SS8 (13*8+8)
#define PARMW(n) sp+#(SS8+4*(n))
// TODO: prefetch
// TODO: min/max
{
allocframe(#SS) //
memd(R29+#0*8-SS8) = R17:16 //
r6 = ##0x7fffffff //
} {
memd(sp+#1*8) = r19:18 //
memd(sp+#2*8) = r21:20 //
min_val = vsplat(r6) //
vzero = #0 //
} {
memd(sp+#3*8) = r23:22 //
memd(sp+#4*8) = r25:24 //
max_val.w = vsub(vzero.w,min_val.w) //
r6 = #0x80 //
} {
memd(sp+#5*8) = r27:26 //
memd(R29+#6*8) = R31:30 //
c80 = vsplat(r6) //
out_width_stride_depth = add(out_width_stride_depth,out_width_stride_depth)//
} {
in_depth = memw(PARMW(2)) //
stride_h_w = memw(PARMW(1)) //
c8 = #8 //
} {
in_width_depth = mpyi(in_depth,in_width) //
stride_w = zxth(stride_h_w) //
filt_height = memw(PARMW(4)) //
recip = memw(PARMW(10)) //
} {
stride_h = asrh(stride_h_w) //
next_outputs = mpyi(stride_w,#-16) //
next_suma_row = memw(PARMW(8)) //
bias_add = memw(PARMW(6)) //
} {
in_width_stride_depth = mpyi(in_width_depth, stride_h)//
next_outputs += mpyi(in_width_depth,filt_height)//
wsum = vmem(bias_add+#0) //
active_sum = memw(PARMW(7)) //
} {
filt_width = memw(PARMW(3)) //
recipvec = vsplat(recip) //
in_width_depth = add(in_width_depth,#-3*4) //
out_x4 = memw(PARMW(0)) //
} {
in_width_depth -= mpyi(filt_width,#4) //
memw(sp+#56) = out_bufe //
stride_w = mpyi(stride_w,#4) //
out_x4 = add(out_x4,#3) //
} {
out_height = memw(PARMW(5)) //
out_x4 = and(out_x4,#-4) //
m0 = stride_w //
stride_w = neg(stride_w) //
} {
recip_shift = memw(PARMW(11)) //
next_suma_row += mpyi(out_x4,stride_w) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{
out_bufe = memw(sp+#56) //
memw(sp+#56) += out_width_stride_depth //
ptr_xh0_ptr_xl0 = in_bufo_in_bufe //
} {
out_x4 = memw(PARMW(0)) //
out_height = add(out_height, #-1) //
in_bufe = add(in_bufe,in_width_stride_depth) //
in_bufo = add(in_bufo,in_width_stride_depth) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
{
sum0 = memw(active_sum++M0) //
ptr_w = weights //
} {
shh0 = vsplat(sum0) //
sum1 = memw(active_sum++M0) //
} {
shh1 = vsplat(sum1) //
sum2 = memw(active_sum++M0) //
shl0 = c80 //
shl1 = c80 //
} {
shh2 = vsplat(sum2) //
sum3 = memw(active_sum++M0) //
shl2 = c80 //
shl3 = c80 //
} {
shh3 = vsplat(sum3) //
loop1(.L_filt_height,filt_height) //
} {
shh0.w = vadd(shh0.w,wsum.w) //
shh1.w = vadd(shh1.w,wsum.w) //
shh2.w = vadd(shh2.w,wsum.w) //
shh3.w = vadd(shh3.w,wsum.w) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_height:
.falign
{
xl13x10_xl03x00 = memd(ptr_xl0++#8) //
xh13x10_xh03x00 = memd(ptr_xh0++#8) //
} {
xl23x20 = memw(ptr_xl0++#4) //
xh23x20 = memw(ptr_xh0++#4) //
p3 = sp1loop0(.L_filt_width,filt_width) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_width:
{
w0h.cur = vmem(ptr_w+#1) //
shl0.uw += vrmpy(w0h.ub,xl03x00.ub) //
shl1.uw += vrmpy(w0h.ub,xl13x10.ub) //
xl13x10_xl03x00 = combine(xl23x20,xl13x10) //
} {
w0l.cur = vmem(ptr_w++#2) //
shl0.uw += vrmpy(w0l.ub,xh03x00.ub) //
shl1.uw += vrmpy(w0l.ub,xh13x10.ub) //
xl33x30 = memw(ptr_xl0++#4) //
} {
shl2.uw += vrmpy(w0h.ub,xl23x20.ub) //
shl3.uw += vrmpy(w0h.ub,xl33x30.ub) //
xl23x20 = xl33x30 //
} {
shh0.uw += vrmpy(w0h.ub,xh03x00.ub) //
shh1.uw += vrmpy(w0h.ub,xh13x10.ub) //
xh13x10_xh03x00 = combine(xh23x20, xh13x10) //
xh33x30 = memw(ptr_xh0++#4) //
} {
shl2.uw += vrmpy(w0l.ub,xh23x20.ub) //
shh2.uw += vrmpy(w0h.ub,xh23x20.ub) //
xh23x20 = xh33x30 //
} {
shl3.uw += vrmpy(w0l.ub,xh33x30.ub) //
shh3.uw += vrmpy(w0h.ub,xh33x30.ub) //
}:endloop0
/* ---------------------------------------------------------------------------- */
{
ptr_xl0 = add(ptr_xl0, in_width_depth) //
ptr_xh0 = add(ptr_xh0, in_width_depth) //
}:endloop1
/* ---------------------------------------------------------------------------- */
{
ptr_xl0 = sub(ptr_xl0, next_outputs) //
ptr_xh0 = sub(ptr_xh0, next_outputs) //
shh0.w += vasr(shl0.w,c8) //
out_x4 = add(out_x4, #-4) //
} {
shh1.w += vasr(shl1.w,c8) //
p1 = cmp.gt(out_x4, #0) //
max_val.w = vmax(max_val.w,shh0.w) //
min_val.w = vmin(min_val.w,shh0.w) //
} {
out0.w = vasl(shh0.w,recip_shift) //
max_val.w = vmax(max_val.w,shh1.w) //
min_val.w = vmin(min_val.w,shh1.w) //
} {
out1.w = vasl(shh1.w,recip_shift) //
} {
shh2.w += vasr(shl2.w,c8) //
sy0.w = vmpye(out0.w,recipvec.uh) //
} {
shh3.w += vasr(shl3.w,c8) //
sy1.w = vmpye(out1.w,recipvec.uh) //
max_val.w = vmax(max_val.w,shh2.w) //
} {
out2.w = vasl(shh2.w,recip_shift) //
sy0.w+= vmpyo(out0.w, recipvec.h):<<1:rnd:sat:shift//
min_val.w = vmin(min_val.w,shh2.w) //
} {
out3.w = vasl(shh3.w,recip_shift) //
max_val.w = vmax(max_val.w,shh3.w) //
min_val.w = vmin(min_val.w,shh3.w) //
} {
sy1.w+= vmpyo(out1.w, recipvec.h):<<1:rnd:sat:shift//
} {
sy2.w = vmpye(out2.w,recipvec.uh) //
} {
sy3.w = vmpye(out3.w,recipvec.uh) //
} {
sy2.w+= vmpyo(out2.w, recipvec.h):<<1:rnd:sat:shift//
} {
sy3.w+= vmpyo(out3.w, recipvec.h):<<1:rnd:sat:shift//
} {
sy0.uh = vpack(sy1.w,sy0.w):sat //
vmem(out_bufe++#1) = sy0.new //
} {
sy2.uh = vpack(sy3.w,sy2.w):sat //
vmem(out_bufe++#1) = sy2.new //
if(p1) jump:t .L_width //
} //end cols per line
/* ---------------------------------------------------------------------------- */
{
active_sum = add(active_sum, next_suma_row) //
p1 = cmp.gt(out_height, #0) //
if(p1.new) jump:t .L_height //
} //end lines per block
/* ---------------------------------------------------------------------------- */
{
r0 = memw(PARMW(9)) // ptr_minmax
} {
v0 = vmem(r0+#0) //
} {
v1 = vmem(r0+#1) //
max_val.w = vmax(max_val.w,v0.w) //
} {
min_val.w = vmin(min_val.w,v1.w) //
r7 = #64 //
loop0(.minmax_lp,#5) //
}
.falign
.minmax_lp:
{
v0 = vror(max_val,r7) //
} {
v1 = vror(min_val,r7) //
r7 = asr(r7,#1) //
} {
max_val.w = vmax(max_val.w,v0.w) //
min_val.w = vmin(min_val.w,v1.w) //
}:endloop0
{
vmem(r0+#0) = max_val //
R17:16 = memd(R29+#0*8) // restore callee-saved registers
} {
vmem(r0+#1) = min_val //
R19:18 = memd(R29+#1*8) // restore callee-saved registers
} {
R21:20 = memd(R29+#2*8) // restore callee-saved registers
R23:22 = memd(R29+#3*8) // restore callee-saved registers
} {
R25:24 = memd(R29+#4*8) // restore callee-saved registers
R31:30 = memd(R29+#6*8) // restore callee-saved registers
} {
R27:26 = memd(R29+#5*8) // restore callee-saved registers
DEALLOC_RETURN // return
}
.L_end:
/*=============================================================================*/
.size inconv2db2b2b2_v60_asm, .L_end-inconv2db2b2b2_v60_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 9,678 | hexagon/asm_src/biasadd_relu_requant_nonalign_hvx.S |
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
#if 0
static inline void biasadd_relu_requant_hvx(
uint8_t *out,
const int32_t *tmp_out,
const int32_t *biasbuf,
const uint32_t num_patches,
const uint32_t depth,
const uint32_t fixed_recip_level_size)
{
int32_t sum;
int32_t i,j;
int32_t outval;
/* do 4 vectors */
/* multiply */
/* pack odd halves */
/* saturate and pack */
/* deal */
/* deal */
/* store a vector */
for (j = 0; j < num_patches; j++) {
for (i = 0; i < depth; i++) {
sum = biasbuf[i] + tmp_out[j*depth+i];
outval = sum * fixed_recip_level_size + (1<<15);
outval >>= 16;
if (outval < 0) outval = 0;
if (outval > 255) outval = 255;
*out++ = outval;
}
}
}
#endif
/*==============================================================================*/
.global biasadd_relu_requant_nonaligned_hvx
.type biasadd_relu_requant_nonaligned_hvx, @function
.balign 32
biasadd_relu_requant_nonaligned_hvx:
/* ============================================================================ */
#define outptr r0
#define acc_buf r1
#define bias_buf0 r2 //aligned
#define num_patches r3 //
#define depth r4
#define fixed_recip_level r5
#define l2cntrl_hi r17
#define l2cntrl_lo r16
#define l2cntrl r17:16
#define l2addr r18
#define stride r6
#define write_cnt r7
#define nbias r8
#define bias_buf r9 //aligned
#define round r10
#define width r11 //write width
#define dalign r12
#define mdsto r13
#define sel0 r14
#define sel1 r15
#define sel r5
#define bias_data0 v0
#define bias_data1 v1
#define bias_data2 v2
#define bias_data3 v3
#define acc_data0 v4
#define acc_data1 v5
#define acc_data2 v6
#define acc_data3 v7
#define out0 v8
#define out1 v9
#define out2 v10
#define out3 v11
#define out32 v12
#define out10 v13
#define out3210 v14
#define scale v15
#define sum0 v16
#define sum1 v17
#define sum2 v18
#define sum3 v19
#define vpredp v20
#define vprede v21
#define qprolog q0
#define qepilog q1
#define tqprolog q2
#define tqepilog q3
#define d0 v22
/* ============================================================================ */
{
allocframe(#32)
nbias = add(depth, #127)
sel0 = ##0x01010101
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
sel1 = add(sel0, sel0)
write_cnt = depth
} {
stride = and(depth, #127)
scale = vsplat(fixed_recip_level)
l2cntrl_hi =asl(depth, #2)
l2cntrl_lo = #1
} {
l2cntrl_lo = combine(l2cntrl_hi.L, l2cntrl_lo.L)
p2 = cmp.eq(stride, #0)
stride = add(stride, #-128)
nbias = lsr(nbias, #7)
} {
l2addr = addasl(acc_buf, depth, #5)
if(p2) stride = #0
loop1(.L_loop1, num_patches)
} {
round = ##0x00008000
}
l2fetch(l2addr, l2cntrl) //
/*==============================================================================*/
.balign 32
.L_loop1:
{
bias_buf = bias_buf0 //
loop0(.L_loop0, nbias) //
acc_data0 = vmemu(acc_buf++#1) //[0,0]
} {
bias_data0.tmp = vmem(bias_buf++#1) //[0,1]
sum0.w = vadd(acc_data0.w, bias_data0.w)//[0,1]
l2addr = addasl(l2addr, depth, #2) //
} {
out0 = vsplat(round) //[0,2]
acc_data1 = vmemu(acc_buf++#1) //[0,2]
}
/*==============================================================================*/
.balign 32
.L_loop0:
{
dalign = and(outptr, #127) //C
out0.w += vmpyie(sum0.w,scale.uh) //[0,3]
bias_data1.tmp = vmem(bias_buf++#1) //[0,3]
sum1.w = vadd(acc_data1.w, bias_data1.w)//[0,3]
} {
width = write_cnt //C
out1 = vsplat(round) //[0,4]
acc_data2 = vmemu(acc_buf++#1) //[0,4]
} {
p0 = cmp.gt(write_cnt, #127) //C
out1.w += vmpyie(sum1.w,scale.uh) //[0,5]
bias_data2.tmp = vmem(bias_buf++#1) //[0,5]
sum2.w = vadd(acc_data2.w, bias_data2.w)//[0,5]
} {
if(p0 ) width = #128 //C
out2 = vsplat(round) //[0,6]
acc_data3 = vmemu(acc_buf++#1) //[0,6]
} {
out10.h = vpacko(out1.w, out0.w) //[0,7]
out2.w += vmpyie(sum2.w,scale.uh) //[0,7]
bias_data3.tmp = vmem(bias_buf++#1) //[0,7]
sum3.w = vadd(acc_data3.w, bias_data3.w)//[0,7]
} {
out3 = vsplat(round) //[0,8]
qprolog = vsetq(outptr) //C
dalign = add(dalign, width ) //C
} {
mdsto = sub(#0, outptr) //C
sel = sel0 //C
qepilog = vsetq(dalign) //C
out3.w += vmpyie(sum3.w,scale.uh) //[0,9]
} {
p1 = !cmp.gt(dalign, #127) //C is block less than 128 bytes
if(p1.new) sel = sel1 //C
acc_data0 = vmemu(acc_buf++#1) //[1,0]
} {
tqprolog = or(qprolog, !qepilog) //C
write_cnt = add(write_cnt, #-128) //C
out32.h = vpacko(out3.w, out2.w) //[0,11]
} {
vprede = vand(qepilog, sel0) //C
vpredp = vand(qprolog, sel0) //C
bias_data0.tmp = vmem(bias_buf++#1) //[1,1]
sum0.w = vadd(acc_data0.w, bias_data0.w)//[1,1]
} {
vpredp|= vand(tqprolog, sel1) //C
out3210.ub = vpack(out32.h, out10.h):sat//[0,12]
} {
qprolog = vand(vpredp, sel) //C
acc_data1 = vmemu(acc_buf++#1) //[1,2]
out0 = vsplat(round) //[1,2]
} {
d0 = vror(out3210, mdsto) //C
qepilog = vand(vprede sel) //C
} {
if( qepilog) vmem(outptr+#1) = d0 //C
} {
if(!qprolog) vmem(outptr++#1) = d0 //C
}:endloop0
/*==============================================================================*/
{
acc_buf = addasl(acc_buf, stride, #2)
outptr = add(outptr, stride)
write_cnt = depth
}{
acc_buf = add(acc_buf, #-256)
l2fetch(l2addr, l2cntrl) //
}:endloop1
/*==============================================================================*/
{
r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
dealloc_return
}
.L_end:
/*==============================================================================*/
.size biasadd_relu_requant_nonaligned_hvx, .L_end-biasadd_relu_requant_nonaligned_hvx
|
XiaoMi/nnlib | 7,608 | hexagon/asm_src/gvmaddvvm_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmaddvvm_asm */
/* */
/* DESCRIPTION */
/* Add y row to each row of matrix z, add column values */
/* x to all columns of z. */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 07/09/16 created */
/*======================================================================*/
/* CYCLE-COUNT: */
/* */
/* -> 5*N/128+6 */
/* */
/* MEMORY */
/* CODESIZE = 240 bytes */
/* ASSUMPTIONS */
/* z output and input data is 128byte aligned and multiple of 32 */
/* y input data is 128byte aligned and multiple of 32 */
/* x is aligned to 8bytes */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmaddvmm_cn (int *x, int *y, int *z, int N, int M, int * maxmin)
{
int i, j;
for (i=0; i < N; i++) {
for (j=0; j < 32; j++) {
z[i*M+j] += x[i] + y[j] ;
maxmin[1] = (z[i*M+j] > maxmin[1] ? z[i*M+j] : maxmin[1];
maxmin[0] = (z[i*M+j] < maxmin[0] ? z[i*M+j] : maxmin[0];
}
}
return;
}
#endif
/*======================================================================*/
.text
.file "gvmaddvvm_h.S"
.global gvmaddvvm_asm
.balign 32
.type gvmaddvvm_asm, @function
gvmaddvvm_asm:
/*======================================================================*/
#define ptr_x0 r0 //row sums
#define ptr_y0 r1 //column sums
#define ptr_z0 r2 //product accumulator
#define n r3 //size of array
#define m r4 //size of array
#define ptr_max r5 //
#define reset r6 //
#define ptr_z1 r8 //product accumulator
#define neg r11 //
#define sum0 r10 //
#define c4 r7 //
#define mstride M0 //
/*======================================================================*/
#define x0 v0 //
#define z0 v1 //
#define z1 v2 //
#define y0 v4 //
#define vneg v3 //
#define maxomaxe v7:6 //
#define maxo v7 //
#define maxe v6 //
#define max v5 //
/*======================================================================*/
.balign 32
{
y0 = vmem(ptr_y0+#0) //[P, 0]
m = asl(m, #2) //[P, 0]
neg = ##0x80000000 //[P, 0]
} {
ptr_z1 = ptr_z0 //[P, 1]
vneg = vsplat(neg) //[P, 1]
reset = memw(sp+#0) //[P, 1]
n = add(n, #-1) //[P, 1]
} {
dcfetch(ptr_x0+#1<<5) //[P, 2]
max = vmem(ptr_max+#0) //[P, 2]
p2 = !cmp.eq(reset, #0) //[P, 2]reset 0 accumulate
c4 = #4 //[P, 2]
} {
mstride = m //[P, 3]
z1 = vneg //[P, 3]
if(p2) max = vneg //[P, 3]most negative value
sum0 = memw(ptr_x0++#1<<2) //[0, 0]
} {
loop0(.L_loopN, n) //[P, 4]
x0 = vsplat(sum0) //[0, 1]
z0.tmp = vmem(ptr_z0++mstride)//[0, 1]
z0.w = vadd(y0.w, z0.w) //[0, 1]
}
.balign 32
.L_loopN:
{
max.w = vmax(max.w, z1.w) //[0, 2]
z1.w = vadd(x0.w, z0.w) //[0, 2]
vmem(ptr_z1++mstride):nt= z1.new //[0, 2]
sum0 = memw(ptr_x0++#1<<2) //[1, 0]
} {
dcfetch(ptr_x0+#2<<5) //[0, 3]
x0 = vsplat(sum0) //[1, 1]
z0.tmp = vmem(ptr_z0++mstride)//[1, 1]
z0.w = vadd(y0.w, z0.w) //[1, 1]
}:endloop0
{
max.w = vmax(max.w, z1.w) //[1, 2]
z1.w = vadd(x0.w, z0.w) //[1, 2]
vmem(ptr_z1+#0):nt= z1.new //[1, 2]
loop0(.L_peak, #5) //[P, 0]
} {
maxe.w = vmax(max.w, z1.w) //[E, 0]
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}{
jumpr r31 //[E, 0]
}
/*======================================================================*/
.L_end:
.size gvmaddvvm_asm, .L_end-gvmaddvvm_asm
|
XiaoMi/nnlib | 9,847 | hexagon/asm_src/vmemcpy_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
.global vmemcpy_asm
.type vmemcpy_asm, @function
.balign 32
vmemcpy_asm:
/* ============================================================================ */
#define dst r7 //dest ptr (moved from r0)
#define src r1 //src ptr
#define length r2 //num bytes
/* ============================================================================ */
#define srcalign r4
#define dstalign r5
#define mid r6
#define end r0 // put 'dst+length' in r0, so always ready to return
#define temp r8
#define nv_dst_m2 r11 // # of dest vectors, -2
#define nv_src_m2 r12 // # of src vectors, -2
#define len_min_129 r13 // length - 129
#define x0 v0
#define x1 v1
#define y0 v2
#define qprolog q0
#define qepilog q1
/*
* void * vmemcpy( void * dst, void * src, int length )
*
* returns dst + length (note that memcpy returns dst).
*
* 'int length' because 'negative' or zero length values will result in no copies.
* There are no restrictions on length or alignment of source/dest buffer; other than
* that they must not overlap.
*
* This is written to only read the vectors that contain source bytes, and only write vectors
* containing part of the dest buffer.
* HOWEVER: if the 'length' parameter is <=0, the function will read one vector from src
* and not perform any stores.
*
* To avoid reading vectors that do not contain source bytes, the v62+ variant uses conditional
* vector loads; the v60 variant sometimes uses conditional branches, sometimes adjusts the source
* pointer to re-read the first or last input vector.
*
* We always have 'prolog' and 'epilog' - first and last output vectors; each
* stores at least one byte.
* - The case where there is only one output vector
* (and one or two input vectors) is handled separately.
* - The case where srcalign=dstalign is also handled separately.
* - For the general case,
* - write prolog (based on one or two input vectors)
* (one if srcalign < dstalign, 2 if srcalign > dstalign
* - inner loop (0 or more, each needing one new source vector)
* - epilog; may or may not need to read one more vector for this.
*
* The following are found:
* srcalign, dstalign: src & 127, dst & 127
* mid srcalign-dstalign Used for 'valign' in loop
* nv_dst_m2 = (dstalign + length-129)>>7 2 less than # dest vectors needed
* nv_src_m2 = (srcalign + length-129)>>7 2 less than # src vectors needed
*
* nv_dst_m2, nv_src_m2 can only differ by 1.
* mid
* 0 nv_src_m2 == nv_dst_m2
* > 0 nv_src_m2 == nv_dst_m2 or nv_dst_m2+1
* < 0 nv_src_m2 == nv_dst_m2 or nv_dst_m2-1
*
* When nv_dst_m2 == -1, this is a special case (only 1 output vector); which is handled using
* a single store, and 1 or 2 loads (two when nv_src_m2 == 0, which is only possible when srcalign > dstalign)
*
* When nv_dst_m2 >= 0, we have a special case for mid == 0 (both buffers have the same alignment); otherwise
* the general case:
* - prolog reads one vector, and one more when mid > 0.
* nv_src_m2_adj = nv_src_m2 - (1 if mid>0; 0 otherwise)
* - middle loop: nv_dst_m2 times; each does one vec load and full store.
* - the epilog requires a vector load whenever nv_src_m2_adj >= nv_dst_m2.
*/
/* ============================================================================ */
{
dst = r0; // dst pointer, move to another reg
srcalign = and(src,#127)
dstalign = and(r0, #127)
len_min_129 = add(length,#-129)
} {
end = add(r0,length); // this will be return value
nv_dst_m2 = add(dstalign,len_min_129)
mid = sub(srcalign,dstalign) // difference in alignment
qprolog =vsetq(dst) //qprolog vec predicate __|---
} {
nv_dst_m2 = asr( nv_dst_m2, #7) // # of dest vecs-2 (>= -1)
p1 = cmp.gt(dstalign,srcalign) // is mid < 0?
#if __HEXAGON_ARCH__ >= 62
qepilog =vsetq2(end)
x0 = vmem(src++#1)
}
#else
temp = and( end,#127)
qepilog =vsetq(end)
} {
p3 = cmp.eq(temp,#0)
x0 = vmem(src++#1)
if( !p3.new ) jump:t .L_qset0
} {
qepilog = or(qepilog,!qepilog) // must be all 1 if end is aligned
}
.L_qset0:
#endif
{
nv_src_m2 = add(srcalign,len_min_129)
p2 = cmp.eq(mid,#0) // src_align = dst_align?
p0 = !cmp.gt(nv_dst_m2,#-1) // true if only one output vector
if(p0.new) jump:nt .L_dst1 // go handle 1-output-vector case
}
////--> at least 2 output vectors: 1 prolog, nv_dst_m2 'middle', 1 epilog
{
loop0( .L_samealign_loop, nv_dst_m2 ) // set up 'same align' loop
if(p2) jump:t .L_samealign // go handle 'same alignment' case
#if __HEXAGON_ARCH__ >= 62
x1 = x0;
#endif
}
#if __HEXAGON_ARCH__ >= 62
{
loop0( .L_cpyloop, nv_dst_m2 ) // set up general case loop
if(!p1) nv_src_m2 = add(nv_src_m2,#-128) // adjust this...
if(!p1) x1 = vmem(src++#1) // conditionally load 2nd vector
}
#else
{
if(!p1) src = add(src,#128) // bump to next if we need extra load
loop0( .L_cpyloop, nv_dst_m2 ) // set up general case loop
} {
if(!p1) nv_src_m2 = add(nv_src_m2,#-128) // adjust this...
x1 = vmem(src+#-1) // load next (or reload first)
}
#endif
{
p2 = !cmp.gt(nv_dst_m2,#0) // true if no loops
nv_src_m2 = asr( nv_src_m2, #7) // find # src vectors needed
y0 = valign( x1,x0,mid) // align data for first store
x0 = x1;
} {
// store prolog, and skip loop where applicable
p0 = cmp.gt( nv_dst_m2, nv_src_m2 ) // when true, don't read vector for epilog.
if( !qprolog )vmem(dst++#1) = y0
if( p2 ) jump:nt .L_endcpy
}
.balign 32
.L_cpyloop:
{
x1.cur = vmem(src++#1)
y0 =valign( x1,x0,mid);
x0 = x1;
} {
vmem(dst++#1) = y0;
} :endloop0
.L_endcpy:
// optional read to feed epilog
#if __HEXAGON_ARCH__ >= 62
{
if(!p0) x1 = vmem(src+#0)
} {
y0 =valign( x1,x0,mid);
}
#else
{
if(p0) src = add(src,#-128) // avoid over-read: back up one vec
} {
x1.cur = vmem(src+#0)
y0 =valign( x1,x0,mid)
}
#endif
{
if( qepilog) vmem(dst+#0) = y0;
jumpr r31;
}
/////////////////////////////////////
//
// Same-alignment src & dest case
//
.L_samealign:
{
p2 = !cmp.gt(nv_dst_m2,#0)
if(!qprolog) vmem(dst++#1) = x0; // store first
if(p2.new) jump:nt .L_samealign_0 // skip if no loops
x0 = vmem(src++#1)
}
// common alignment loop
.balign 32
.L_samealign_loop:
{
vmem(dst++#1) = x0;
x0 = vmem(src++ #1)
} :endloop0
.L_samealign_0:
{
if(qepilog) vmem(dst) = x0;
jumpr r31;
}
/////////////////////////////////////
// one vector output cases
// Need to read an extra vector if (and only if) nv_src_m2 >=0.
//
// Here we detect length <=0 (p1 and p0 will both be false).
//
.L_dst1:
{
p1 = cmp.gt(length,#0) // any at all?
p0 = cmp.gt(nv_src_m2,#-1) // if srcalign + length-129 >= 0, need 2nd source vector
x1 = x0
#if __HEXAGON_ARCH__ >= 62 // use conditional vector load
} {
if( p0 ) x1 = vmem(src +#0)
}
#else
if( !p0.new) jump:t .L_dst1_skiprd
} {
x1 = vmem(src +#0)
}
.L_dst1_skiprd:
#endif
{
if( !p1 ) jumpr r31 // done if length <= 0
qprolog = or(qprolog, !qepilog ) // combine epilog -> prolog
y0 = valign( x1,x0, mid)
} {
if( !qprolog) vmem(dst) = y0;
jumpr r31;
}
.L_end:
/*==============================================================================*/
.size vmemcpy_asm, .L_end-vmemcpy_asm
|
XiaoMi/nnlib | 12,626 | hexagon/asm_src/resizebilinear_d32_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : do_bilin_interp_x2_single_slice_HVX
*
* DESCRIPTION
* 2x bilinear interpolation resize
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = bytes
* STACK = bytes
* ASSUMPTIONS
*
*
* C MODEL
*/
/*=============================================================================*/
.text
.global do_bilin_interp_x2_single_slice_HVX
.balign 32
.type do_bilin_interp_x2_single_slice_HVX, @function
do_bilin_interp_x2_single_slice_HVX:
/*=============================================================================*/
#if __HEXAGON_ARCH__ == 60
#define COND(_a)
#elif __HEXAGON_ARCH__ >= 62
#define COND(_a) IF (_a)
#endif
/*=============================================================================*/
#define in_ptr r0
#define in_row_stride r1
#define in_width r2
#define in_height r3
#define out_ptr r4
#define out_row_stride r5
#define c_32 r6
#define c32 r7
#define c32_32 r7:6
#define wloops r8
#define in_heighteven r9
#define vpin1 in_heighteven
#define vpin0 r10
#define vpout0 r11
#define vpout0_vpin0 r11:10
#define vpin2 r12
#define vpout1 r13
#define vpout2 r14
#define vpout3 r15
#define vpout3_2 r15:14
#define vplastline r28
/*=============================================================================*/
#define qright q0
#define vl0 v0
#define vl1 v1
#define vl2 v2
#define vr0 v3
#define vr1 v4
#define vr2 v5
#define v0_1 v6
#define v1_1 v0_1
#define v2_1 v0_1
#define vl0s1 v0_1
#define vl1s1 v0_1
#define vl2s1 v0_1
#define v0odd v7
#define v1odd v8
#define v2odd v9
#define vout0_l v10
#define vout0_h v11
#define dvout0 v11:10
#define vout1_l v12
#define vout1_h v13
#define dvout1 v13:12
#define vout2_l v14
#define vout2_h v15
#define dvout2 v15:14
#define sv2_l v16
#define sv2_h v17
#define dv2 v17:16
#define vout1a v18
#define vout1b v19
#define vout3a v20
#define vout3b v21
#define vodd0 v22
#define vodd1 v23
#define vodd2 v24
#define vr v25
#define vl0t vr
/*=============================================================================*/
{
in_width = add(in_width,#-1) //
in_heighteven = asr(in_height,#1) //
vplastline = mpyi(in_height,in_row_stride) //
c32_32 = combine(#32,#-32) //
}{
wloops = asr(in_width,#2) //
vplastline = add(vplastline,in_ptr) //
vpout3_2 = combine(out_ptr,out_ptr) //
vpout1 = out_ptr //
}{
in_width = asl(in_width,#5) //
p1 = cmp.gt(in_heighteven,#0) //
vplastline = sub(vplastline,in_row_stride) //
p0 = cmp.gt(r0,r0) //
}{
qright = vsetq(in_width) //
if !p1 jump .lpy_end //
p1 = cmp.gt(wloops,#0) //
loop1(.lpy,in_heighteven) //
}
.falign
.lpy:
{
vpout0_vpin0 = combine(out_ptr,in_ptr) //
vpin1 = add(in_ptr,in_row_stride) //
vpin2 = addasl(in_ptr,in_row_stride,#1) //
if p0 vmem(vpout0+#0) = vout0_h //[O2]
}{
vl0 = vmem(vpin0++#1) //
vpout1 = add(out_ptr,out_row_stride) //
p3 = sp1loop0(.lpx,wloops)
COND(p0) vmem(vpout1+#0) = vout1b //[O2]
}{
vl1 = vmem(vpin1++#1) //
vpin2 = min(vpin2,vplastline) //
vpout2 = add(vpout1,out_row_stride) //
COND(p0) vmem(vpout2+#0) = vout1_h //[O2]
}{
vl2 = vmem(vpin2++#1) //
vpout3 = add(vpout2,out_row_stride) //
if !p1 jump .lpx_end //
COND(p0) vmem(vpout3+#0) = vout3b //[O2]
}
.falign
.lpx:
{
vr0.cur = vmem(vpin0++#1) //
v0_1 = valign(vr0,vl0,c32) //
COND(p3) vmem(vpout1+#0) = vout1b //[2]
if p3 vpout1 = add(vpout1,#64) //[2]
}{
vr1.cur = vmem(vpin1++#1) //
v1_1 = valign(vr1,vl1,c32) //
v0odd.ub = vavg(vl0.ub,v0_1.ub) //
COND(p3) vmem(vpout3+#0) = vout3a //[2]
}{
vr2.cur = vmem(vpin2++#1) //
v2_1 = valign(vr2,vl2,c32) //
v1odd.ub = vavg(vl1.ub,v1_1.ub) //
COND(p3) vmem(vpout3+#1) = vout3b //[2]
}{
v2odd.ub = vavg(vl2.ub,v2_1.ub) //
dvout0 = vshuff(v0odd,vl0,c_32) //
vmem(vpout0++#1) = vout0_l.new //
vl0 = vr0 //
}{
dvout2 = vshuff(v1odd,vl1,c_32) //
vmem(vpout2++#1) = vout2_l.new //
vl1 = vr1 //
if p3 vpout1 = add(vpout1,#64) //[2]
}{
dv2 = vshuff(v2odd,vl2,c_32) //
vmem(vpout0++#1) = vout0_h //
vl2 = vr2 //
}{
vmem(vpout2++#1) = vout2_h //
vout1b.ub = vavg(vout2_h.ub,vout0_h.ub) //
vout3a.ub = vavg(sv2_l.ub,vout2_l.ub) //
vout3b.ub = vavg(sv2_h.ub,vout2_h.ub) //
}{
vout1a.ub = vavg(vout2_l.ub,vout0_l.ub) //
vmem(vpout1++#1) = vout1a.new //
if p3 vpout3 = add(vpout3,#256) //[2]
}:endloop0
.lpx_end:
{
if p3 vmem(vpout1++#1) = vout1b //[e]
vl0s1 = vror(vl0,c32) //
p0 = tstbit(in_width,#1+5) //
}{
if p3 vmem(vpout3++#1) = vout3a //[e]
vr0 = vmux(qright,vl0s1,vl0) //
vl1s1 = vror(vl1,c32) //
}{
if p3 vmem(vpout3++#1) = vout3b //[e]
vodd0.ub = vavg(vr0.ub,vl0.ub) //
vr1 = vmux(qright,vl1s1,vl1) //
vl2s1 = vror(vl2,c32) //
}{
vodd1.ub = vavg(vr1.ub,vl1.ub) //
vr2 = vmux(qright,vl2s1,vl2) //
}{
dvout0 = vshuff(vodd0,vl0,c_32) //
vodd2.ub = vavg(vr2.ub,vl2.ub) //
vmem(vpout0++#1) = vout0_l.new //
}{
dvout1 = vshuff(vodd1,vl1,c_32) //
vmem(vpout2++#1) = vout1_l.new //
in_ptr += mpyi(in_row_stride,#2) //
out_ptr += mpyi(out_row_stride,#4) //
//
}{
dvout2 = vshuff(vodd2,vl2,c_32) //
vout1a.ub = vavg(vout1_l.ub,vout0_l.ub):rnd //
vout1b.ub = vavg(vout1_h.ub,vout0_h.ub):rnd //
vmem(vpout1++#1) = vout1a.new //
}{
vout3a.ub = vavg(vout2_l.ub,vout1_l.ub):rnd //
vout3b.ub = vavg(vout2_h.ub,vout1_h.ub):rnd //
vmem(vpout3++#1) = vout3a.new //
}:endloop1
.lpy_end:
{
if p0 vmem(vpout0+#0) = vout0_h //[e]
vpin0 = in_ptr //
vpout0 = out_ptr //
p2 = tstbit(in_height,#0) //
}{
if p0 vmem(vpout1+#0) = vout1b //[e]
vpout1 = add(out_ptr,out_row_stride) //
}{
if p0 vmem(vpout2+#0) = vout1_h //[e]
}{
if p0 vmem(vpout3+#0) = vout3b //[e]
if (!p2) jumpr r31 //
}{
if !p1 jump .loddx_end //
p3 = sp1loop0(.lodd,wloops) //
vl0 = vmem(vpin0++#1) //
}
.falign
.lodd:
{
vr0.cur = vmem(vpin0++#1) //
vl0s1 = valign(vr0,vl0,c32) //
vl0t = vl0 //
COND(p3) vmem(vpout0+#0) = vout0_h //[2]
}{
v0odd.ub = vavg(vl0.ub,vl0s1.ub) //
vl0 = vr0 //
if p3 vmem(vpout1++#1) = vout0_l //[2]
}{
if p3 vmem(vpout1++#1) = vout0_h //[2]
if p3 vpout0 = add(vpout0,#128) //[2]
}{
dvout0 = vshuff(v0odd,vl0t,c_32) //
vmem(vpout0++#1) = vout0_l.new //
}:endloop0
.loddx_end:
{
if p3 vmem(vpout0++#1) = vout0_h //[e]
vl0s1 = vror(vl0,c32) //
}{
if p3 vmem(vpout1++#1) = vout0_l //[e]
vr = vmux(qright,vl0s1,vl0) //
}{
if p3 vmem(vpout1++#1) = vout0_h //[e]
vodd0.ub = vavg(vr.ub,vl0.ub) //
}{
dvout0 = vshuff(vodd0,vl0,c_32) //
vmem(vpout0++#1) = vout0_l.new //
p0 = tstbit(in_width,#1+5) //
}{
vmem(vpout1++#1) = vout0_l //
}{
if p0 vmem(vpout0++#1) = vout0_h //
}{
if p0 vmem(vpout1++#1) = vout0_h //
jumpr r31 //
}
.do_bilin_interp_x2_single_slice_HVX_end:
/*=============================================================================*/
.size do_bilin_interp_x2_single_slice_HVX, .do_bilin_interp_x2_single_slice_HVX_end-do_bilin_interp_x2_single_slice_HVX
/*=============================================================================*/
|
XiaoMi/nnlib | 10,035 | hexagon/asm_src/gemvmpybbw_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gemm matrix vector multiply, result 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote to do mat-vec mult */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> 5*K/512+24 */
/* */
/* MEMORY */
/* CODESIZE = 1040 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* ptr_be, ptr_c are 128 byte aligned */
/* ptr_a is 16 byte aligned */
/* K >= 16, K%16=0 */
/* 1<= M <=32 */
/* C MODEL */
/*======================================================================*/
#if 0
void gemvmpybbw_cn(
uint8 * a, int a_off, uint8 * b, int b_off, int * c, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[k];
b_val = b[k*M+j];
sum += (a_val + a_off) * (b_val + b_off);
}
c[j] = sum;
}
}
return;
}
// actually done as..
sum += a_val * b_val;
asum += a_val
bsum += b_val
}
c[j] = sum + a_off*bsum + b_off*asum + K*a_off*b_off
}
#endif
/*=============================================================================*/
.text
.file "gemvmpybbw_h.S"
.global gemvmpybbw_asm
.balign 32
.type gemvmpybbw_asm, @function
gemvmpybbw_asm:
/*=============================================================================*/
#define ptr_a r0 //data
#define a_offset r1
#define ptr_be r2 //weights must be pre processwed and transposed
#define b_offset r3
#define ptr_c r4 //results
#define m r5 //can be < 32 will write less
#define k r6 //k % 16
/*=============================================================================*/
#define ki r7 //
#define ptr_bo r8 //
#defne c0101 r9
#define sum1sum0 r15:14
#define sum1 r15
#define sum0 r14
#define c0101 r16
#define mask r17
#define x7x4x3x0 r11:10 //
#define xfxcxbx8 r13:12 //
#define x3x0 r10 //
#define x7x4 r11 //
#define xbx8 r12 //
#define xfxc r13 //
/*=============================================================================*/
#define y0 v0 //
#define y1 v1 //
#define y2 v2 //
#define y3 v3 //
#define bsum v5
#define z0 v4 //
#define asum v6
#define va_offset v7
/*=============================================================================*/
{
k = memw(sp+#0)
allocframe(#16)
} {
memd(sp+#0) = r17:16
mask = #127
m = asl(m, #2) //ints
} {
q3 = vsetq(m) //
p0 = bitsclr(m, mask)
if(!p0.new) jump:nt .L_32 //
} {
q3 = vcmp.eq(v0.b, v0.b) //enable all bits
}
/*============================================================================*/
.balign 32
.L_32:
{
p1 = cmp.gt(k,#31) // must be >= 32 to need at least one loop
ptr_bo = add(ptr_be, #128) //[ , P]
ki = lsr(k, #4) //k / 16
} {
dcfetch(ptr_a+#4<<5) //[0, 0]prefetch next line
ki = add(ki, #-1) //
} {
c0101 = ##0x01010101
x7x4x3x0 = memd(ptr_a++#8) //[0, 1]
loop0(.L_loopK, ki) //[P, 9]ki is k1/4 - 2
} {
y0.tmp = vmem(ptr_be++#2) //[0, 2]32x4
z0.uw = vrmpy(y0.ub, x3x0.ub) //[0, 2]
bsum.uw = vrmpy(y0.ub, c0101.ub) //[0, 2]
sum0 = #0
} {
y1.tmp = vmem(ptr_bo++#2) //[0, 3]32x4
z0.uw += vrmpy(y1.ub, x7x4.ub) //[0, 3]
xfxcxbx8 = memd(ptr_a++#8) //[0, 3]
bsum.uw += vrmpy(y1.ub, c0101.ub) //[0, 3]
} {
y2.tmp = vmem(ptr_be++#2) //[0, 4]32x4
z0.uw += vrmpy(y2.ub, xbx8.ub) //[0, 4]
bsum.uw += vrmpy(y2.ub, c0101.ub) //[0, 4]
sum1 = #0
} {
if( !p1 ) jump:nt .L_loop0
}
/*============================================================================*/
.balign 32
.L_loopK:
{
y3.tmp = vmem(ptr_bo++#2) //[0, 5]32x4
z0.uw += vrmpy(y3.ub, xfxc.ub) //[0, 5]
bsum.uw += vrmpy(y3.ub, c0101.ub) //[0, 5]
dcfetch(ptr_a+#4<<5) //[1, 0]prefetch next line
} {
sum1sum0+=vraddub(xfxcxbx8,x7x4x3x0)//[0,6]
x7x4x3x0 = memd(ptr_a++#8) //[1, 1]
} {
y0.tmp = vmem(ptr_be++#2) //[1, 2]32x4
z0.uw += vrmpy(y0.ub, x3x0.ub) //[1, 2]
bsum.uw += vrmpy(y0.ub, c0101.ub) //[1, 2]
} {
y1.tmp = vmem(ptr_bo++#2) //[1, 3]32x4
z0.uw += vrmpy(y1.ub, x7x4.ub) //[1, 3]
xfxcxbx8 = memd(ptr_a++#8) //[1, 3]
bsum.uw += vrmpy(y1.ub, c0101.ub) //[1, 3]
} {
y2.tmp = vmem(ptr_be++#2) //[1, 4]32x4
z0.uw += vrmpy(y2.ub, xbx8.ub) //[1, 4]
bsum.uw += vrmpy(y2.ub, c0101.ub) //[1, 4]
}:endloop0
/*=============================================================================*/
.L_loop0:
{ y3.tmp = vmem(ptr_bo++#2) //[1, 5]32x4
z0.uw += vrmpy(y3.ub, xfxc.ub) //[1, 5]
bsum.uw += vrmpy(y3.ub, c0101.ub) //[1, 5]
} {
sum1sum0+=vraddub(xfxcxbx8,x7x4x3x0)//[1,6]
} {
sum0 = add(sum0, sum1)
} {
sum0 = mpyi(sum0, b_offset)
sum1 = mpyi(a_offset, b_offset)
} {
sum0 += mpyi(sum1, k)
a_offset = combine(a_offset.L, a_offset.L)
} {
asum = vsplat(sum0)
va_offset = vsplat(a_offset)
} {
z0.w = vadd(z0.w, asum.w)
bsum.w = vmpyio(bsum.w, va_offset.h)
} {
z0.w = vadd(z0.w, bsum.w)
r17:16 = memd(sp+#0)
} {
if(q3) vmem(ptr_c+#0) = z0 //[E,16]
} {
dealloc_return
}
.L_end:
/*=============================================================================*/
.size gemvmpybbw_asm, .L_end-gemvmpybbw_asm
|
XiaoMi/nnlib | 12,224 | hexagon/asm_src/fullconnlayerbatch1_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : fullconnlayerbatched
*
* DESCRIPTION
* perform matrix multiply on activation and output relu data and max/min
*
* ARCHITECTURE : QDSP6V60+ + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/22/18 created
* DJH 10/21/18 stopped overreading past read arrays
* ------------------------------------------------------------- */
.text
.file "fullconnlayerbatch1_h.S"
.global fullconnlayerbatch1_asm
.balign 32
.type fullconnlayerbatch1_asm, @function
fullconnlayerbatch1_asm:
#define ptr_x0 r0 //pointer to activation batch
#define ptr_w r1 //pointer to weight chunkoutput depth 32
#define ptr_zi r2 //pointer to output
#define in_depth_pad r3 //the number of elements in the input vector % 32 out depth by def 32
#define num_batches r4 //dummy input is 1
#define ptr_max r5 //the on going max and mins for the output
#define recip_level r6 //32bit coefficients 255 / (est_max - est_min)
#define bias_adjust r7 //typically sum of bias and sum of weights
#define actns_adjust r8 //used toadjust the product
#define woffset r10 //the byte offset from weight position
#define recip_shift r12 //shift for accumulator if outputs larger
#define n r3 //num iterations
#define cntrl r9 //control value for populating vpredicate table
#define sel r11 //choose entry to predicate table
#define xsum0 r8 //sum of activations batch 0
#define d07654_d03210 r13:12 //actviastion values
#define d07654 r13 //actviastion values
#define d03210 r12 //actviastion values
#define d0fedc_d0ba98 r15:14 //actviastion values
#define d0fedc r15 //actviastion values
#define d0ba98 r14 //actviastion values
#define fetch0 r6 //fetch ptr to even batch
#define out_ptr0 r2 //even output batch ptr
#define align0 r4 //output alignment
#define FETCH_INC #32 //
#define NULL #0 //NULL 000 ptr
#define RSS <<1:rnd:sat:shift //unverbose the insturction
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug reg
#define bias0 v12 //even bias values
#define sum00 v1 //even accumulator
#define weight00 v3 //weights from fc layer
#define weight01 v3 //weights from fc layer
#define weight02 v3 //weights from fc layer
#define weight03 v3 //weights from fc layer
#define min0 v4 //min of accumulator
#define max0 v5 //max of accumulator
#define max0_min0 v5:4
#define b0 v10 //even batch quantized outputs
#define vpred v0 //sum of vector predicates for cntrling output
#define wsum v8 //bias values
#define recipvec v9 //255/max quantized value
#define vshamt_vec v14 //splat quantized shift values
/* ----------------------------------------------------------------------- */
{ allocframe(#56) //reserve stack
n = lsr(in_depth_pad, #4) //
} {
max0 = vmem(ptr_max+#0 ) //
cntrl = #32 //
n = add(n, #-1) //correct for pipeline
} {
recip_shift = memw(sp+#20<<2) //
sel = ##0x01010101 //
q0 = vsetq(cntrl) //
} {
recip_level = memw(sp+#16<<2) //get quantize coeff
cntrl = #64 //
vpred = vand(q0, sel) //1___ v(32)
sel = add(sel, sel) //
} {
recipvec = vsplat(recip_level) //replicate to vector
bias_adjust = memw(sp+#17<<2) //
q1 = vsetq(cntrl) //
min0 = vmem(ptr_max+#1 ) //
} {
wsum = vmem(bias_adjust+#0) //[P, ]
actns_adjust = memw(sp+#18<<2) //
q0 = and(q1, !q0) //
cntrl = #96 //
} {
vshamt_vec = vsplat(recip_shift) //
vpred |= vand(q0, sel) //_1__ v(64) & !v(32)
sel = add(sel, sel) //
q0 = vsetq(cntrl) //
} {
woffset = memw(sp+#19<<2) //
q1 = and(q0, !q1) //
dcfetch(ptr_x0+#0<<6) //[P, ]
} {
vpred |= vand(q1, sel) //__1_ v(96) & !v(64)
sel = add(sel, sel) //
q1 = not(q0) //
} {
vpred |= vand(q1, sel) //___1 !v(96)
sel = add(sel, sel) //
q1 = and(q0, !q0) //
fetch0 = add(ptr_x0, FETCH_INC) //[
} {
loop0(.L_matmul32, n) //[P, ]]
vpred |= vand(q1, sel) //sel = 0x10101010
xsum0 = memw(actns_adjust+#0<<2) //[P, ]batch 0 -sum
} {
d07654_d03210 = memd(ptr_x0++#1<<3) //[0, 0]read batch 0
sum00 = vsplat(xsum0) //[P, ]splat the sum of accs
} {
d0fedc_d0ba98 = memd(ptr_x0++#1<<3) //[0, 2]read batch 0
sum00.w = vadd(sum00.w, wsum.w) //[P, ]set up accumulator 0
out_ptr0 = add(ptr_zi, woffset) //add the output weights offset
}
/* ------------------------------------------------------------------------------ */
.balign 32
.L_matmul32:
{ dcfetch(fetch0+#0<<6) //[0, 3]prefetch batch 0
fetch0 = add(fetch0, FETCH_INC) //[0, 3]increment fetch
weight00.tmp = vmem(ptr_w++#1) //[0, 4]read weights
sum00.uw += vrmpy(weight00.ub, d03210.ub) //[0, 4]do dotproduct of acts with matrix
} {
weight01.tmp = vmem(ptr_w++#1) //[0, 5]read weights
sum00.uw += vrmpy(weight01.ub, d07654.ub) //[0, 5]do dotproduct of acts with matrix
d07654_d03210 = memd(ptr_x0++#1<<3) //[1, 0]get batch input
} {
weight02.tmp = vmem(ptr_w++#1) //[0, 6]read weights
sum00.uw += vrmpy(weight02.ub, d0ba98.ub) //[0, 6]do dotproduct of acts with matrix
} {
weight03.tmp = vmem(ptr_w++#1) //[0, 7]read weights
sum00.uw += vrmpy(weight03.ub, d0fedc.ub) //[0, 7]do dotproduct of acts with matrix
d0fedc_d0ba98 = memd(ptr_x0++#1<<3) //[1, 2]get batch input
}:endloop0
/* ------------------------------------------------------------------------------ */
{ weight00.tmp = vmem(ptr_w++#1) //[1, 3]read weights
sum00.uw += vrmpy(weight00.ub, d03210.ub) //[1, 3]do dotproduct of acts with matrix
} {
weight01.tmp = vmem(ptr_w++#1) //[1, 4]read weights
sum00.uw += vrmpy(weight01.ub, d07654.ub) //[1, 4]do dotproduct of acts with matrix
} {
weight02.tmp = vmem(ptr_w++#1) //[1, 5]read weights
sum00.uw += vrmpy(weight02.ub, d0ba98.ub) //[1, 5]do dotproduct of acts with matrix
} {
weight03.tmp = vmem(ptr_w++#1) //[1, 6]read weights
sum00.uw += vrmpy(weight03.ub, d0fedc.ub) //[1, 6]do dotproduct of acts with matrix
} {
sum00.w = vasl(sum00.w, vshamt_vec.w)
align0 = extractu(out_ptr0, #2, #5) //xx00000
} {
b0.w = vmpye(sum00.w, recipvec.uh) //[E, ]quantize
align0 = lsl(#1, align0) //convert to power of 2
} {
b0.w+= vmpyo(sum00.w, recipvec.h):RSS //[E, ]quantize
align0 = vsplatb(align0) //create table lookup cntrls
} {
max0.w = vmax(max0.w, b0.w) //[E, ]update even max
min0.w = vmin(min0.w, b0.w) //[E, ]update even min
q0 = vand(vpred, align0) //access even alignment cntrl
} {
b0.h = vpack(b0.w, b0.w):sat //[E, ]#>>16
} {
vmem(ptr_max+#0) = max0 //[E, 0]32max
} {
vmem(ptr_max+#1) = min0 //[E, 0]32min
} {
b0.ub = vpack(b0.h, b0.h):sat //[E, ]16 to 8 sat
} {
if(q0) vmem(out_ptr0) = b0 //[E, ]store and increment batch
} {
dealloc_return //restore fram and return
}
.L_end:
/* ------------------------------------------------------------------------------ */
.size fullconnlayerbatch1_asm, .L_end-fullconnlayerbatch1_asm
/* ------------------------------------------------------------------------------ */
|
XiaoMi/nnlib | 10,126 | hexagon/asm_src/gemsuma_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemsuma_asm */
/* */
/* DESCRIPTION */
/* X matrix to be accumulated horizontally and */
/* multiplied by y_offset, done in parallel doesn't */
/* fully optimized */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 de-optimized inne rloop */
/*======================================================================*/
/* IDEAL CYCLE-COUNT: */
/* */
/* -> 3*K*N/16+5*N/4+8 */
/* */
/* MEMORY */
/* STACK = 64 bytes */
/* CODESIZE = 288bytes */
/* ASSUMPTIONS */
/* x is 8byte aligned xsum is 8byte aligned */
/* N%4=0 K%8=0 */
/* C MODEL */
/* N = Nlen */
/* K = Klen | Kstride */
/*======================================================================*/
#if 0
void gemsuma_cn(uchar *x, int N, int K, int * xsum, int y_offset, int z_offset)
{
int i, k;
int ksize = 0xffff&(K >> 16);
int kstride = 0xffff& K;
int x_val;
int sum;
for (i=0; i < N; i++) {
sum = z_offset;
for (k=0; k < ksize; k++) {
x_val = x[i*kstride+k];
sum += x_val;
}
xsum[i] = sum * y_offset + z_offset;
}
}
#endif
/*=============================================================================*/
.text
.file "gemsuma_h.S"
.global gemsuma_asm
.balign 32
.type gemsuma_asm, @function
gemsuma_asm:
/*=============================================================================*/
#define ptr_x r0 //
#define n r1 //n is number of rows to be summed
#define k r2 //k | kstride
#define ptr_xsum r3 //
#define y_offset r4
#define z_offset r5 //correction factor K*xo*yo e.g.
#define kjump r6 //kstride
#define ki r7 //
#define kstride r8 //alias to k1
#define mkk M1 //
#define kk_1 M0 //skip back
#define c32_kstride r10 //11111111
#define c8_kstride r11 //11111111
#define l1xptri r12 //
#define l1xptr r13 //11111111
#define kstride2 r9 //
#define x07x04_x03x00 r21:20 //111111--
#define x07x04 r21 //111111--
#define x03x00 r20 //11------
#define x17x14_x13x10 r15:14 //-1111111
#define x17x14 r15 //-1111111
#define x13x10 r14 //-111----
#define x27x24_x23x20 r21:20 //-------1
#define x27x24 r21 //-------1
#define x23x20 r20 //-------1
#define x37x34_x33x30 r15:14 //--------
#define x37x34 r15 //--------
#define x33x30 r14 //--------
#define sum01_sum00 r17:16
#define sum11_sum10 r19:18
#define sum01 r17
#define sum00 r16
#define sum11 r19
#define sum10 r18
/*=============================================================================*/
{
allocframe(#32) //
kjump = lsr(k, #16) //size of k
n = lsr(n, #1) //divide by 2
kstride = zxth(k) //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
loop1(.L_loopN, n) //[ , P]for(i=0; i < n; i+=2){
ki = lsr(k, #20) //k/16
} {
memd(sp+#16) = r21:20 //
kstride2 = asl(kstride, #1) //2*kstride
l1xptr = addasl(ptr_x, kstride, #1) //l1 fetch 2 klines ahead
c32_kstride = sub(#32, kstride) //zag back to next column of lines
} {
kjump = sub(kstride2, kjump) //+32 - 4*k
c8_kstride = sub(#16, kstride) //zag back to next column of dwords
mkk = kstride //stride k
} {
kk_1 = c8_kstride //
l1xptri = l1xptr //[ , P]make temp copy
l1xptr = addasl(l1xptr, kstride, #1) //[ , P]advance by 2k strip
p2 = cmp.eq(r0, r0) //
}
/*=============================================================================*/
.balign 32
.L_loopN:
{
sum01_sum00 = combine(#0, #0) //
sum11_sum10 = combine(#0, #0) //
loop0(.L_loopK, ki) //[ , P]ki is k1/2 - 2
}
/*============================================================================*/
.balign 32
.L_loopK:
{
x17x14_x13x10 = memd(ptr_x+#8) //[0,0]
x07x04_x03x00 = memd(ptr_x++mkk) //[0,0]
} {
sum01_sum00 +=vraddub(x17x14_x13x10, x07x04_x03x00) //[0,1]
x27x24_x23x20 = memd(ptr_x++kk_1) //[0,3]6
x37x34_x33x30 = memd(ptr_x+#8) //[0,3]
p2 = not(p2) //[0,3]
} {
sum11_sum10+=vraddub(x37x34_x33x30, x27x24_x23x20) //[0,4]
//dcfetch(l1xptri+#0) //[0,1]prefetch next line
if(!p2)l1xptri = add(l1xptri, kstride) //[0,1]nex line
if(p2) l1xptri = add(l1xptri,c32_kstride) //[0,3]
}:endloop0
{
sum00 = add(sum01, sum00) //
sum01 = add(sum11, sum10) //
ptr_x = add(ptr_x, kjump) //skip back to next row
} {
sum00 = mpyi(sum00, y_offset) //
sum01 = mpyi(sum01, y_offset) //
} {
sum00 = add(sum00, z_offset) //
sum01 = add(sum01, z_offset) //
l1xptri = l1xptr //[ , P]make temp copy
} {
memd(ptr_xsum++#1<<3) = sum01_sum00 //
l1xptr = addasl(l1xptr, kstride, #1) //[ , P]advance by 2k strip
p2 = cmp.eq(r0, r0) //
}:endloop1
/*=============================================================================*/
{
r17:16 = memd(sp+#0) //restore stack and return
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gemsuma_asm, .L_end-gemsuma_asm
|
XiaoMi/nnlib | 28,432 | hexagon/asm_src/gvconvsum2dbbb_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gvm vector matrix multiply, result left at */
/* 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> K*N/256+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 960 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvconvsum2dbbb_h.S"
.global gvconvsum2dbbb_asm
.balign 32
.type gvconvsum2dbbb_asm, @function
gvconvsum2dbbb_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define in_width r3 //(pad_x+in_width) * depth
#define out_width r4 //out_width
#define m r5 //is stride of the output matrix always mult of 32
#define stride_depth r8 //0 stride|depth between computations
#define filt_width r6 //1 filt_width mpy by depth
#define filt_height r6 //2 filt_hieght lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_datasum r10 //4
#define ptr_weightsum r11 //5
#define ptr_max r16 //6
#define in_offset r14 //7
#define zsum r6 //8
#define ptr_biasbuf r14 //9
#define recip_level r15 //10
#define max_shr r0 //11
#define PREFETCH r21 //640 //256
/*=============================================================================*/
#define sel r8
#define len r9
#define filt_skip r13 //the skip back after the fot_width is done for next filt_y
#define stride3_1 r12 //used in prefetch
#define ptr_x0 r11
#define stride4 r13 //
#define stride r25
#define next_outputs r23 //jump to input ptr for next set of outputs
#define ptr_y r9 //
#define col_count r22
#define c4 r6
#define mstride r15
#define fetch_count r7
#define pre_x r28
#define round_amt r6
#define sel0 r16
#define sel1 r17
#define sel2 r18
#define sel3 r19
#define one r20
#define tmp_ptr_z r7
#define sum1_sum0 r1:0
#define sum1 r1
#define sum0 r0
#define sum3_sum2 r5:4
#define sum3 r5
#define sum2 r4
#define sum5_sum4 r25:24
#define sum5 r25
#define sum4 r24
#define sum7_sum6 r27:26
#define sum7 r27
#define sum6 r26
#define MSTRIDE M0 //stride*depth
#define M4STRIDE_1 M1 //3*stride*depth-16 0-1-2-3
//01234567
#define x07x04x03x00 r21:20 //11-----1
#define x07x04 r21 //11-----1
#define x03x00 r20 //1------1
#define x0fx0cx0bx08 r15:14 //1111---1
#define x0fx0c r15 //1111---1
#define x0bx08 r14 //111----1
#define x17x14x13x10 r19:18 //11------
#define x17x14 r19 //11------
#define x13x10 r18 //1-------
#define x1fx1cx1bx18 r17:16 //1111----
#define x1fx1c r17 //1111----
#define x1bx18 r16 //111-----
#define x27x24x23x20 r21:20 //---111--
#define x27x24 r21 //---111--
#define x23x20 r20 //---11---
#define x2fx2cx2bx28 r19:18 //---1111-
#define x2fx2c r19 //---11111
#define x2bx28 r18 //---1111-
#define x37x34x33x30 r15:14 //----11--
#define x37x34 r15 //----11--
#define x33x30 r14 //----1---
#define x3fx3cx3bx38 r17:16 //----1111
#define x3fx3c r17 //----1111
#define x3bx38 r16 //----111-
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vwsum v15 //
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define vc8000 v14 //
#define biasvec v18 //
#define recipvec v16 //
#define rndvec v17 //
#define vpreds v19 //precomputed vector predicates for stores
/*=============================================================================*/
{
sel = ##0x01010101 // entry 0
len = #32 //
dcfetch(ptr_x) //
} {
q0 = vsetq(len); // 1000
len = #64 //
round_amt = ##0x00008000 //1<<15 rounding
} {
vpreds = vand(q0, sel) //
q2 = vsetq(len); // 1100
len = #96 //
rndvec = vsplat(round_amt) //
} {
q1 = and(q2, !q0) // 1100 & 0111 = 0100
q3 = vsetq(len) // 1110
sel = add(sel, sel) //02020202
} {
vpreds|= vand(q1, sel) //
q2 = and(q3, !q2) // 0010
q3 = not(q3) // 0001
sel = add(sel, sel) //04040404
} {
vpreds|= vand(q2, sel) //
sel = add(sel, sel) //08080808
dcfetch(ptr_x+#32) //
} {
vpreds|= vand(q3, sel) // entry 3 10101010 selects all zero
stride_depth = memw(sp+#0<<2) //extract stride*depth
filt_width = memw(sp+#1<<2) //
} {
p0 = cmp.eq(filt_width, #1) //
dcfetch(ptr_x+#32) //
filt_width = mpy(stride_depth.L, filt_width.L)
} {
memw(sp+#1<<2) = filt_width //
} {
filt_height = memw(sp+#2<<2) //extract filt_height
ptr_biasbuf = memw(sp+#9<<2) //
} {
biasvec = vmem(ptr_biasbuf+#0) //
recip_level = memw(sp+#10<<2) //
} {
recipvec = vsplat(recip_level) //
ptr_weightsum = memw(sp+#5<<2) //ptr pre computed weight sum
allocframe(#72) // 20<<2
} {
memd(sp+#32) = r25:24 //
memd(sp+#0) = r17:16 //
stride = lsr(stride_depth, #16) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
stride_depth = mpy(stride_depth.H, stride_depth.L)
} {
M0 = stride_depth //
memd(sp+#8) = r19:18 //
memd(sp+#40) = r27:26 //
PREFETCH = #96
} {
vwsum = vmem(ptr_weightsum+#0) //
stride3_1 = addasl(stride_depth, stride_depth,#1) //3*stride
r16 = ##0x80000001 //max negative
} {
stride3_1 = sub(#16, stride3_1) //
next_outputs = mpyi(filt_height, in_width)
vc8000 = vsplat(r16) //
memw(sp+#56) = out_width //
} {
filt_width = memw(sp+#21<<2) //extract filt_width*depth
M1 = stride3_1 // add to
if(p0) PREFETCH = add(PREFETCH, #-32) //64 IF xx1 FILTER
} {
memw(sp+#68) = PREFETCH
p3 = cmp.gt(filt_width, #192) //is !(D <= 192) heuristic to fix fetchahaead for small nx1 cases
PREFETCH = asl(stride_depth, #2) //mpyi(filt_width, #3) //used for line prefetch
} {
if(!p3) memw(sp+#68) = PREFETCH
out_height = memw(sp+#23<<2) //
} {
stride3_1 = add(stride3_1, #16) //used for line prefetch
stride4= asl(stride_depth, #1) //4-2*stride
memw(sp+#48) = ptr_x //
memw(sp+#52) = ptr_yi //
} {
memw(sp+#60) = m //
next_outputs = sub(next_outputs, stride4)
filt_skip = sub(filt_width, in_width)
filt_width = lsr(filt_width, #4) //filt_width / 16
} {
memw(sp+#64) = r28
ptr_max = memw(sp+#26<<2) //ptr pre computed max value in output
filt_width = add(filt_width, #-1)
p3 = cmp.gt(stride_depth, #96)
} {
stride3_1 = sub(stride3_1, stride_depth) //used for line prefetch
maxe= vmem(ptr_max+#0)
in_width = mpyi(in_width, stride) //
memw(sp+#21<<2) = filt_width //extract filt_width*depth /16 - 1
} {
PREFETCH = memw(sp+#68)
dcfetch(ptr_x+#64) //
} {
p1 = cmp.eq(out_height, #0) //
if(p1.new) jump:t .L_height_end //
}
/*============================================================================*/
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#48) //
memw(sp+#23<<2) -= #1 //out_height = add(out_height, #-1) //
} {
col_count = memw(sp+#56) //out_width
memw(sp+#48) += in_width //ptr_x += in_width
pre_x = add(ptr_x0, PREFETCH)
}
.balign 32
.L_width:
{
ptr_y = memw(sp+#52) //ptr_yi //[P, 0] initialize filter pointer
filt_height = memw(sp+#22<<2) //extract filt_height
fetch_count = #0 //#0
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
z1z0 = vcombine(vwsum, vwsum) //[P, 0]
dcfetch(pre_x)
} {
loop1(.L_filt_height, filt_height) //[P, 0]for(filt_y=0; filt_y < n; filt_y+=1){
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
z3z2 = vcombine(vwsum, vwsum) //[P, 0]
pre_x = add(pre_x, stride_depth)
} {
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
sum1_sum0 = combine(#0, #0) //[P, 0]
sum3_sum2 = combine(#0, #0) //[P, 0]
} {
sum5_sum4 = combine(#0, #0) //[P, 0]
sum7_sum6 = combine(#0, #0) //[P, 0]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}
.balign 32
.L_filt_height:
{
filt_width = memw(sp+#21<<2) //extract filt_width*depth /16 - 1
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[0, 4]
dcfetch(pre_x)
pre_x = add(pre_x, stride_depth)
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 4]
p3 = cmp.eq(fetch_count, #1)
} {
sum3_sum2 += vraddub(x1fx1cx1bx18, x17x14x13x10) //[0, 5]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 5]
y2 = vmem(ptr_y++#2) //[0, 5]32x4
if(p3) pre_x = add(pre_x, stride3_1)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 6]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 6]
y3 = vmem(ptr_y+#-1) //[0, 6]32x4
fetch_count = sub(#1, fetch_count)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 7]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 7]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[0, 7]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[0, 7]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 8]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 8]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[0, 8]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[0, 8]
} {
sum5_sum4 += vraddub(x2fx2cx2bx28, x27x24x23x20) //[0, 9]
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 9]
} {
p0 = cmp.eq(filt_width,#0)
if (p0.new) jump:nt .L_skip
}
.balign 32
.L_filt_width:
{
sum7_sum6 += vraddub(x3fx3cx3bx38, x37x34x33x30) //[0,10]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0,10]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(pre_x)
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0,11]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0,11]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,12]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,12]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[1, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,13]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,13]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[1, 3]
} {
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1, 4]
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 4]
dcfetch(pre_x)
pre_x = add(pre_x, stride_depth)
} {
sum3_sum2 += vraddub(x1fx1cx1bx18, x17x14x13x10) //[1, 5]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 5]
y2 = vmem(ptr_y++#2) //[1, 5]32x4
p3 = cmp.eq(fetch_count, #1)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 6]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 6]
y3 = vmem(ptr_y+#-1) //[1, 6]32x4
dcfetch(pre_x)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 7]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 7]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[1, 7]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[1, 7]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 8]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 8]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[1, 8]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[1, 8]
} {
fetch_count = sub(#1, fetch_count)
sum5_sum4 += vraddub(x2fx2cx2bx28, x27x24x23x20) //[1, 9]
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 9]
if(p3) pre_x = add(pre_x, stride3_1) //[1, 9]
}:endloop0
.L_skip:
{
sum7_sum6 += vraddub(x3fx3cx3bx38, x37x34x33x30) //[1,10]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1,10]
y0 = vmem(ptr_y++#2) //[0, 0]32x4
ptr_x0 = sub(ptr_x0, filt_skip) //[E, 0]move to next line ptr_y keeps going
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1,11]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1,11]
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
PREFETCH = memw(sp+#68) //
} {
pre_x = add(ptr_x0, PREFETCH)
dcfetch(ptr_x0+#64)
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,12]
fetch_count = #0
} {
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,12]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,13]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,13]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}:endloop1
{
ptr_x0 = sub(ptr_x0, next_outputs) //
in_offset = memw(sp+#27<<2) //+18+7
zsum = memw(sp+#28<<2) //+18+8res as zsum
p0 = cmp.gt(col_count, #1) //#1
} {
sum0 = zsum //
sum1 = add(sum0, sum1) //
ptr_datasum = memw(sp+#24<<2) //data sum ptr
one = #1 //
} {
sum0 += mpyi(in_offset, sum1) //
mstride = memw(sp+#60) //result matrix stride copied to separete reg
sel0 = extractu(ptr_z, #2, #5) //extract alignement 32bytes
PREFETCH = memw(sp+#68) //
} {
memw(ptr_datasum++#1<<2) = sum0 //
x0 = vsplat(sum0) //
y0 = rndvec //y0 = 0x8000
sel0 = asl(one, sel0) //
} {
sel0 = vsplatb(sel0) //01 to 01010101
z0.w = vadd(z0.w, x0.w) //
sum2 = zsum //#1
sum3 = add(sum2, sum3) //#1
} {
maxe.w = vmax(maxe.w, z0.w) //
z0.w = vadd(z0.w, biasvec.w) //
q0 = vand(vpreds, sel0) //
sum2 += mpyi(in_offset, sum3) //#1
} /*{
max_shr = memw(sp+#31<<2) // 20<<2+11<<2
} */{
one = mux(p0, #1, #16) //#1
x1 = vsplat(sum2) //#1
y1 = rndvec //#1y1 = 0x8000
p1 = cmp.gt(col_count, #2) //#2
} /*{
z0.w = vasr(z0.w, max_shr)
} */{
pre_x = add(ptr_x0, PREFETCH) //pre_x, next_outputs) //
y0.w += vmpyie(z0.w, recipvec.uh) //
if(p0)memw(ptr_datasum++#1<<2) = sum2 //#1
z1.w = vadd(z1.w, x1.w) //#1
} {
x1.w = vadd(z1.w, biasvec.w) //#1
if(!p0) z1 = vc8000 //#1
sum4 = zsum //#2
sum5 = add(sum4, sum5) //#2
} {
y0.h = vpacko(y0.w, y0.w) //>>16
maxe.w = vmax(maxe.w, z1.w) //#1
sum4 += mpyi(in_offset, sum5) //#2
y2 = rndvec //#2y2 = 0x8000
} /*{
x1.w = vasr(x1.w, max_shr)
}*/ {
y1.w += vmpyie(x1.w, recipvec.uh) //#1
if(p1)memw(ptr_datasum++#1<<2) = sum4 //#2
p2 = cmp.gt(col_count, #3) //#3
tmp_ptr_z = add(ptr_z, mstride) //
} {
y0.ub = vpack(y0.h, y0.h):sat //sat8 <0, >255
x2 = vsplat(sum4) //#2
dcfetch(ptr_x0)
sel1 = extractu(tmp_ptr_z, #2, #5) //#1
} {
if(q0) vmem(ptr_z+#0):nt = y0 //[E, ]store first 32bytes
ptr_z = add(ptr_z, mstride) //
y1.h = vpacko(y1.w, y1.w) //#1>>16
z2.w = vadd(z2.w, x2.w) //#2
} {
x2.w = vadd(z2.w, biasvec.w) //#2
if(!p1) z2 = vc8000 //#2
dcfetch(ptr_x0+#32)
sel1 = asl(one, sel1) //#1
} {
y1.ub = vpack(y1.h, y1.h):sat //#1sat8 <0, >255
sum6 = zsum //#3
sum7 = add(sum6, sum7) //#3
sel1 = vsplatb(sel1) //#102 -> 02020202
} /*{
x2.w = vasr(x2.w, max_shr)
}*/ {
maxe.w = vmax(maxe.w, z2.w) //#2
y2.w += vmpyie(x2.w, recipvec.uh) //#2
sum6 += mpyi(in_offset, sum7) //#3
y3 = rndvec //#3y3 = 0x8000
} {
x3 = vsplat(sum6) //#3
q1 = vand(vpreds, sel1) //#1
dcfetch(ptr_x0+#64)
} {
one = mux(p1, #1, #16) //#2
if(p0)tmp_ptr_z = add(tmp_ptr_z,mstride) //#1
z3.w = vadd(z3.w, x3.w) //#3
if(q1)vmem(ptr_z+#0):nt = y1 //#1[E, ]
} {
if(p0)ptr_z = add(ptr_z, mstride) //#1
if(p2)memw(ptr_datasum++#1<<2) = sum6 //#3
x3.w = vadd(z3.w, biasvec.w) //#3
y2.h = vpacko(y2.w, y2.w) //#2>>16
} /* {
x3.w = vasr(x3.w, max_shr)
} */ {
sel2 = extractu(tmp_ptr_z, #2, #5) //#2
y3.w += vmpyie(x3.w, recipvec.uh) //#3
if(p1)tmp_ptr_z = add(tmp_ptr_z, mstride) //#2
memw(sp+#24<<2) = ptr_datasum //data sum ptr
} {
sel2 = asl(one, sel2) //#2
sel3 = extractu(tmp_ptr_z, #2, #5) //#3
one = mux(p2, #1, #16) //#3
y2.ub = vpack(y2.h, y2.h):sat //#2sat8 <0, >255
} {
sel2 = vsplatb(sel2) //#202 -> 02020202
y3.h = vpacko(y3.w, y3.w) //#3>>16
if(!p2) z3 = vc8000 //#3
sel3 = asl(one, sel3) //#3
} {
sel3 = vsplatb(sel3) //#302 -> 02020202
col_count = add(col_count, #-4) //
maxe.w = vmax(maxe.w, z3.w) //#3
q2 = vand(vpreds, sel2) //#2
} {
if(q2)vmem(ptr_z+#0):nt = y2 //#2[E, ]
if(p1)ptr_z = add(ptr_z, mstride) //#2
y3.ub = vpack(y3.h, y3.h):sat //#3sat8 <0, >255
q3 = vand(vpreds, sel3) //#3
} {
if(q3)vmem(ptr_z+#0):nt = y3 //#3[E, ]
if(p2)ptr_z = add(ptr_z, mstride) //#3
p3 = cmp.gt(col_count, #0) //
if(p3.new) jump:t .L_width //
}//end cols per line
{
out_height = memw(sp+#23<<2) //
PREFETCH = memw(sp+#68) //
} {
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
.L_height_end:
{
loop0(.L_peak, #5) //[P, 0]
c4 = #4 //
ptr_max = memw(sp+#26<<2) //ptr pre computed max value in output
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
r28 = memw(sp+#64)
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvconvsum2dbbb_asm, .L_end-gvconvsum2dbbb_asm
|
XiaoMi/nnlib | 9,528 | hexagon/asm_src/im2col33322_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*
Function
--------
im2col for filter 3x3 depth 3 pading = 2 stride = 2 non same no padding
pack rows 0,1,2 into sequence, advance by 1 element repack
void im2col33322_hvx(uchar * in, uchar * im2col_data,
int x_offset, uchar * delta_tables,
startoutrow, numoutrows);
each output row contains 149 packed elements, 32bytes each
===============================================================================*/
#define ptrIn r0
#define ptrOut r1
#define xoffset r2
#define ptrDelta r3
#define startorow r4
#define numorows r5
//scaler regs
#define sel0 r7
#define sel1 r8
#define sel2 r9
#define sel3 r10
#define sel4 r11
#define sel5 r12
#define sel6 r13
#define sel7 r27
#define sel_line0 r2
#define sel_line2 r6
#define offset r15
#define ptrRow0 r15
#define ptrRow1 r16
#define ptrRow2 r17
#define rot0 r18
#define optr r19
#define cm18 r20
#define cm0 r25
#define cm9 r21
#define cm32 r22
#define iwidth r23
#define owidth r24
#define row_cnt r26
#define line_count r3
//vector regs
#define vpred0 v0
#define vpred1 v1
#define vpred2 v2
#define vpred3 v3
#define in0 v4
#define in1 v5
#define in2 v6
#define out012 v7
#define vxoffset v8
#define vpredo v9
#define vq0 q2
#define vq1 q3
#define vq2 q3
#define vq3 q3
#define vq31 q1 //select prolog,kernle,epilog padding
#define vq5 q3
#define vq6 q3
/* ============================================================================ */
.global im2col33322_hvx
.type im2col33322_hvx, @function
.balign 32
im2col33322_hvx:
/* ============================================================================ */
{ allocframe(#64)
} {
vpredo = vmem(ptrDelta+#0)
xoffset = vsplatb(xoffset)
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
sel0 = #32
vxoffset = vsplat(xoffset)
} {
memd(sp+#16)= r21:20
memd(sp+#24)= r23:22
} {
M1 = sel0
memd(sp+#32)= r25:24
memd(sp+#40)= r27:26
sel0 = #120
} {
M0 = sel0
iwidth = #897
sel0 = ##0x01010101
} {
sel1 = add(sel0, sel0)
sel2 = asl(sel0, #2)
sel3 = asl(sel0, #3)
owidth = #4768 //149*32
} {
sel4 = add(sel3, sel3)
sel5 = asl(sel3, #2)
sel6 = asl(sel3, #3)
} {
sel7 = add(sel6, sel6)
line_count = startorow
numorows = add(numorows, startorow)
}
/* ============================================================================ */
.L_lines:
{
optr = ptrOut
cm0 = and(ptrOut,#96) //is it 0, 32,64 or 96byte alignment
ptrOut = add(ptrOut,owidth)
offset = add(line_count, line_count)
} {
cm0 = sub(#0, cm0)
offset = mpyi(offset, iwidth)
} {
vpred0 = vror(vpredo, cm0) //adjust guidance for 64b alignment
ptrRow0 = add(ptrIn, offset) //retard pointer
cm32 = #-32
cm9 = add(cm0, #-9)
} {
ptrRow1 = add(ptrRow0, iwidth) //0*299*3-3
vpred1 = vror(vpred0, cm32)
} {
ptrRow2 = add(ptrRow1, iwidth) //1*299*3-3
vpred2 = vror(vpred1, cm32)
cm18= add(cm0, #-18)
} {
vpred3 = vror(vpred2, cm32)
row_cnt = #0
loop1(.L_outer, #8)
}
/* ============================================================================ */
.balign 32
.L_outer:
{ in0 = vmemu(ptrRow0++M0)
} {
in1 = vmemu(ptrRow1++M0)
} {
in2 = vmemu(ptrRow2++M0)
rot0 =#-26
} {
in0 = vror(in0, cm0)
loop0(.L_loop0, #5) //create 16 x 42 values
} {
in1 = vror(in1, cm9)
vq31 = vand(vpred1, sel3) //[0,5]
} {
in2 = vror(in2, cm18)
vq1 = vand(vpred0, sel1) //[0,0]
} {
out012 = vmux(vq1, in1, in0) //[0,1]
vq2 = vand(vpred0, sel2) //[0,1]
in0 = vror(in0, rot0) //[0,1]
}
/* ============================================================================ */
.balign 32
.L_loop0:
{ out012 = vmux(vq2, in2, out012) //[0,2]
vq3 = vand(vpred0, sel3) //[0,2]
in1 = vror(in1, rot0) //[0,2]
row_cnt = add(row_cnt, #32) //[0,2]
} {
out012 =vmux(vq3,vxoffset,out012)//[0,3]
vq0 = vand(vpred0, sel0) //[0,3]
in2 = vror(in2, rot0) //[0,3]
vq1 = vand(vpred1, sel1) //[0,3]
} {
if(vq0) vmem(optr++M1) = out012 //[0,4]advance 0
out012 = vmux(vq1, in1, in0) //[0,4]
vq2 = vand(vpred1, sel2) //[0,4]
in0 = vror(in0, rot0) //[0,4]
} {
out012 = vmux(vq2, in2, out012) //[0,5]
in1 = vror(in1, rot0) //[0,5]
p2 = cmp.eq(row_cnt, owidth) //[0,5]
if(p2.new) jump:nt .L_break //[0,8]
} {
out012 =vmux(vq31,vxoffset,out012)//[0,6]
vq0 = vand(vpred1, sel0) //[0,6]
in2 = vror(in2, rot0) //[0,6]
vq1 = vand(vpred2, sel1) //[0,6]
} {
if(vq0) vmem(optr++M1) = out012 //[0,7]advance 1
out012 = vmux(vq1, in1, in0) //[0,7]
vq2 = vand(vpred2, sel2) //[0,7]
in0 = vror(in0, rot0) //[0,7]
} {
out012 = vmux(vq2, in2, out012) //[0,8]
vq3 = vand(vpred2, sel3) //[0,8]
in1 = vror(in1, rot0) //[0,8]
} {
out012 =vmux(vq3,vxoffset,out012)//[0,9]
vq0 = vand(vpred2, sel0) //[0,9]
in2 = vror(in2, rot0) //[0,9]
vq1 = vand(vpred3, sel1) //[0,9]
} {
if(vq0) vmem(optr++M1) = out012 //[0,10]advance 2
out012 = vmux(vq1, in1, in0) //[0,10]
vq2 = vand(vpred3, sel2) //[0,10]
in0 = vror(in0, rot0) //[0,10]
} {
out012 = vmux(vq2, in2, out012) //[0,11]
vq3 = vand(vpred3, sel3) //[0,11]
in1 = vror(in1, rot0) //[0,11]
row_cnt = add(row_cnt, #96) //[0,8]
} {
out012 =vmux(vq3,vxoffset,out012)//[0,12]
vq0 = vand(vpred3, sel0) //[0,12]
in2 = vror(in2, rot0) //[0,12]
vq1 = vand(vpred0, sel1) //[1,0]
} {
if(vq0) vmem(optr++M1) = out012 //[0,13]advance 3
out012 = vmux(vq1, in1, in0) //[1,1]
vq2 = vand(vpred0, sel2) //[1,1]
in0 = vror(in0, rot0) //[1,1]
}:endloop0:endloop1
/* ============================================================================ */
.balign 32
.L_break:
{
line_count = add(line_count, #1)
} {
p0 = cmp.eq(line_count,numorows)
if(!p0.new) jump:t .L_lines
}
/* ============================================================================ */
{
r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
r21:20 = memd(sp+#16)
r23:22 = memd(sp+#24)
} {
r25:24 = memd(sp+#32)
r27:26 = memd(sp+#40)
}
dealloc_return
/* ============================================================================ */
.L_end:
.size im2col33322_hvx, .L_end-im2col33322_hvx
|
XiaoMi/nnlib | 21,108 | hexagon/asm_src/dwconv2dbbb_s2_3x3_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_s2_3x3_asm
*
* DESCRIPTION
* Perform 2d depthwise convolution using elements along depth, do only simple
* convolution. THis is optimized for case of 3x3 filter horizontal stride of 2.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V6x + HVX
*
* REVISION HISTORY:
* =================
*
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 06/26/19 created
*
* MEMORY
* CODESIZE = 768 bytes
* STACK = 64 bytes
* ASSUMPTIONS
* width multiple of 4 depth multiple of 32 aligned to 32bytes
* BIT ACCURATE C MODEL
*/
#if 0
void dwconv2dbbb_s2_3x3_cn(
uint8_t *in_buf,
uint8_t *filt,
uint8_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int filt_width,
int filt_height,
int filt_zero,
int32_t *bias_sum,
int32_t *max,
int recip_level,
int recip_shift,
int stride_height)
{
int out_y, d, out_x, ur, in_val, filt_val;
int out_z, filt_y, filt_x, buf_offset;
int out_width_pad = (out_width+3)&(~3);
int32_t sum, zum;
int64_t lsum ;
int o_filt_width = (filt_width+3)&(~3);
int stride_width = 2;
for (out_y = 0; out_y < out_height; out_y++) {
for (out_x = 0; out_x < out_width_pad; out_x+=4) {
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
for(ur=0; ur < 4; ur++) {
sum = (int32_t)bias_sum[32*d+out_z];
zum = 0;
for (filt_y = 0; filt_y < filt_height; filt_y++) {
for (filt_x = 0; filt_x < o_filt_width; filt_x++) {
buf_offset = (out_y * stride_height + filt_y) * next_in_width
+ d * next_in_width_32
+ (out_x*stride_width + ur*stride_width + filt_x) * 32
+ out_z;
in_val = in_buf[buf_offset];
filt_val = filt[32*d*filt_height*o_filt_width
+ (o_filt_width*filt_y)*32
+ out_z*4 + 128*(filt_x/4)
+ (filt_x % 4)] ;
sum += (uint32_t)in_val*(int32_t)filt_val;
if(filt_x < filt_width)
zum += (uint32_t)in_val*(int32_t)filt_zero;
}
}
sum = sum - zum;
sum <<= recip_shift;
lsum = (int64_t)sum * ((int64_t)recip_level) + 0x40000000LL;
lsum = lsum >> 31;
sum = (int)lsum;
max[out_z] = (sum > max[out_z]) ? sum : max[out_z];
max[out_z+32] = (sum < max[out_z+32]) ? sum : max[out_z+32];
if(lsum < 0) lsum = 0; if(lsum > 0xffll) lsum = 0xffll;
out_buf[out_y * next_out_width
+ 32 * (out_x+ur)
+ d * next_out_width_32
+ out_z] = (uint8_t) lsum;
}//ur
}//out_z
}//d
}//out_x
}//out_y
return;
}
#endif
/* ----------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s2_3x3_h.S"
.global dwconv2dbbb_s2_3x3_asm
.balign 32
.type dwconv2dbbb_s2_3x3_asm, @function
dwconv2dbbb_s2_3x3_asm:
/* ----------------------------------------------------------------------------- */
//io registers
#define in_buf r0
#define filt r1
#define out_buf r2
#define next_in_width_depth r3
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5
#define next_out_width_32 r6
#define depth r7
#define out_width r8
#define out_height r9
#define filt_height r6 //nc
#define filt_zero r10
#define bias_sum r11
#define ptr_max r12
#define recip_level r13
#define recip_shift r14
#define stride_v r28
#define in_left_skip r22
//state scaler registers
#define ptr_w0 r15
#define bias_ptr r16
#define ptr_x0 r17
#define ptr_x1 r18
#define ptr_x2 r19
#define ptr_xin r20
#define ptr_y r21
#define next_in_width_depth_stride r28
#define zzzz r10 //111111111111
#define _zzz r10 //111111111111
//vector registers
#define vrecip v0
#define max v1
#define min v2
#define bias_val v3
#define x0 v4 //___11_______
#define x1 v5 //_111________
#define x2 v13 //_111________
#define w0_210 v6 //_______111__
#define w1_210 v7 //_______111__
#define w2_210 v8 //_______111__
#define x0_ba98 v9 //__________11
#define x0_3210 v10 //111111111111
#define x1_ba98 v11 //__________11
#define x1_3210 v12 //111111111111
#define x2_ba98 v13 //__________11
#define x2_3210 v14 //111111111111
#define x0_7654 v30 //111111111111
#define x0_3232 v27 //111111111111
#define x0_5432 v27 //111111111111
#define x0_7676 v28 //111111111111
#define x0_9876 v29 //111111111111
#define x1_7654 v30 //111111111111
#define x1_3232 v26 //111111111111
#define x1_5432 v27 //111111111111
#define x1_7676 v29 //111111111111
#define x1_9876 v29 //111111111111
#define x2_7654 v30 //111111111111
#define x2_3232 v26 //111111111111
#define x2_5432 v27 //111111111111
#define x2_7676 v28 //111111111111
#define x2_9876 v29 //111111111111
#define s0 v15 //111111111111
#define s1 v16 //111111111111
#define s2 v17 //111111111111
#define s3 v18 //111111111111
#define z0 v19 //111111111111
#define z1 v20 //111111111111
#define z2 v21 //111111111111
#define z3 v22 //111111111111
#define d0 v23 //
#define d1 v24 //
#define d1d0 v23 //
#define d2 v25 //
#define d3 v26 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on sbuf (56+8)/4=20
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
} {
memd(sp+#16) = r21:20
memd(sp+#24) = r23:22
} {
next_out_width_32 = memw(sp+#16<<2) //
depth = memw(sp+#17<<2) //
p2 = !cmp.eq(r0,r0) //
} {
out_width = memw(sp+#18<<2) //
out_height = memw(sp+#19<<2) //
depth = lsr(depth, #5) //depth/32
ptr_y = out_buf //
} {
filt_zero = memw(sp+#21<<2) //
bias_sum = memw(sp+#22<<2) //
loop1(.L_depth, depth)
out_width = add(out_width, #3)
} {
ptr_max = memw(sp+#23<<2) //
recip_level = memw(sp+#24<<2) //
zzzz = vsplatb(filt_zero)
out_width = lsr(out_width, #2)
} {
recip_shift = memw(sp+#25<<2) //
max = vmem(ptr_max+#0)
ptr_xin = in_buf
} {
stride_v = memw(sp+#26<<2) //
in_left_skip= memw(sp+#28<<2) //
_zzz = lsr(zzzz, #8)
bias_ptr = bias_sum //
} {
next_in_width_depth_stride = \
mpyi(next_in_width_depth, stride_v)//
min = vmem(ptr_max+#1)
_zzz = asl(_zzz, in_left_skip)
ptr_w0 = filt
} {
vrecip = vmem(recip_level)
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
.L_depth:
{ x0.tmp = vmem (ptr_xin+#0):nt //
ptr_x0 = add(ptr_xin, #128) //
x0.b = vshuff(x0.b)
ptr_x1 = add(ptr_xin, next_in_width_depth) //[0, 4]
} {
x1.tmp = vmem (ptr_x1++#1)
x1.b = vshuff(x1.b)
ptr_x2 = add(ptr_x1, next_in_width_depth) //[0, 4]
} {
x2.tmp = vmem (ptr_x2++#1)
x2.b = vshuff(x2.b)
} {
w0_210.tmp = vmem(ptr_w0++#1)
w0_210.w = vasl(w0_210.w, in_left_skip)
x0_3210.b = vshuff(x0.b)
} {
x0.tmp = vmem (ptr_x0++#1):nt //[0, 3]
x0.b = vshuff(x0.b) //[0, 3]
} {
w1_210.tmp = vmem(ptr_w0++#1)
w1_210.w = vasl(w1_210.w, in_left_skip)
x1_3210.b = vshuff(x1.b)
x0_3232.h = vshuffo(x0_3210.h, x0_3210.h) //[0, 5]
} {
x0_7654.b = vshuff(x0.b) //[0, 5]
p3 = sp1loop0(.L_width, out_width)
bias_val = vmem(bias_ptr++#1) //
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
} {
x1.tmp = vmem (ptr_x0++#1) //[0, 4]
x1.b = vshuff(x1.b) //[0, 4]
x0_5432.h = vshuffe(x0_7654.h,x0_3232.h) //[0, 7]
} {
w2_210.tmp = vmem(ptr_w0++#1)
w2_210.w = vasl(w2_210.w, in_left_skip)
x2_3210.b = vshuff(x2.b)
s0.uw = vrmpy(x0_3210.ub, w0_210.ub) //[0, 6]filter even output
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ s2.uw = vrmpy(x0_7654.ub, w0_210.ub) //[0, 8]filter even output
x0_ba98.b = vshuff(x1.b) //[0, 6]
x0_7676.h = vshuffo(x0_7654.h, x0_7654.h) //[0, 7]
} {
z0.uw = vrmpy(x0_3210.ub, _zzz.ub) //[0, 7]filter even output
z1.uw = vrmpy(x0_5432.ub, _zzz.ub) //[0,10]filter even output
x0_9876.h = vshuffe(x0_ba98.h,x0_7676.h) //[0, 8]
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
} {
s1.uw = vrmpy(x0_5432.ub, w0_210.ub) //[0, 9]filter even output
x0.tmp = vmem (ptr_x1++#1) //[0, 8]
x0.b = vshuff(x0.b) //[0, 8]
} {
z2.uw = vrmpy(x0_7654.ub, _zzz.ub) //[0, 7]filter even output
z3.uw = vrmpy(x0_9876.ub, _zzz.ub) //[0,10]filter even output
x1.tmp = vmem(ptr_x1++#1) //[0, 9]
x1.b = vshuff(x1.b) //[0, 9]
} {
s3.uw = vrmpy(x0_9876.ub, w0_210.ub) //[0,11]filter even output
x1_7654.b = vshuff(x0.b) //[0,10]
x1_3232.h = vshuffo(x1_3210.h, x1_3210.h) //[0,10]
} {
s0.uw += vrmpy(x1_3210.ub, w1_210.ub) //[0,12]filter even output
x1_5432.h = vshuffe(x1_7654.h,x1_3232.h) //[0,12]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
if(p2) vmem (ptr_y++#1) = d3210.new //[WIDTH, E]
} {
z0.uw += vrmpy(x1_3210.ub, _zzz.ub) //[0,13]filter even output
z2.uw += vrmpy(x1_7654.ub, _zzz.ub) //[0,13]filter even output
x1_ba98.b = vshuff(x1.b) //[0,11]
x1_7676.h = vshuffo(x1_7654.h, x1_7654.h) //[0,11]
} {
s2.uw += vrmpy(x1_7654.ub, w1_210.ub) //[0,14]filter even output
x1_9876.h = vshuffe(x1_ba98.h,x1_7676.h) //[0,14]
x0_3210 = x0_ba98 //[0, 9]
if(!p3)ptr_y = out_buf //
} {
s1.uw += vrmpy(x1_5432.ub, w1_210.ub) //[0,15]filter even output
x1_3210 = x1_ba98 //[0,15]
x0.tmp = vmem (ptr_x2++#1) //[0,14]
x0.b = vshuff(x0.b) //[0,14]
} {
s3.uw += vrmpy(x1_9876.ub, w1_210.ub) //[0,17]filter even output
x1.tmp = vmem (ptr_x2++#1) //[0,15]
x1.b = vshuff(x1.b) //[0,15]
x2_3210 = x2_ba98 //[0,21]
} {
z1.uw += vrmpy(x1_5432.ub, _zzz.ub) //[0,16]filter even output
z3.uw += vrmpy(x1_9876.ub, _zzz.ub) //[0,16]filter even output
x2_7654.b = vshuff(x0.b) //[0,16]
x2_3232.h = vshuffo(x2_3210.h, x2_3210.h) //[0,16]
} {
s0.uw += vrmpy(x2_3210.ub, w2_210.ub) //[0,18]filter even output
x2_5432.h = vshuffe(x2_7654.h,x2_3232.h) //[0,18]
x2_7676.h = vshuffo(x2_7654.h, x2_7654.h) //[0,17]
if(!p3)out_buf=add(out_buf,next_out_width_32) //[DEPTH]
} {
z0.uw += vrmpy(x2_3210.ub, _zzz.ub) //[0,19]filter even output
z2.uw += vrmpy(x2_7654.ub, _zzz.ub) //[0,19]filter even output
s0.w = vadd(s0.w, bias_val.w) //[WIDTH, P]
x2_ba98.b = vshuff(x1.b) //[0,17]
} {
s2.uw += vrmpy(x2_7654.ub, w2_210.ub) //[0,20]filter even output
x2_9876.h = vshuffe(x2_ba98.h,x2_7676.h) //[0,20]
s0.w = vsub(s0.w, z0.w) //
p2 = cmp.eq(r0,r0) //
} {
s1.uw += vrmpy(x2_5432.ub, w2_210.ub) //[0,21]filter even output
s2.w = vadd(s2.w, bias_val.w) //[WIDTH, P]
x0.tmp = vmem (ptr_x0++#1):nt //[1, 3]
x0.b = vshuff(x0.b) //[1, 3]
} {
z1.uw += vrmpy(x2_5432.ub, _zzz.ub) //[0,22]filter even output
z3.uw += vrmpy(x2_9876.ub, _zzz.ub) //[0,22]filter even output
s0.w = vasl(s0.w, recip_shift) //
s2.w = vsub(s2.w, z2.w) //
} {
s3.uw += vrmpy(x2_9876.ub, w2_210.ub) //[0,23]filter even output
s1.w = vadd(s1.w, bias_val.w) //[WIDTH, P]
x1.tmp = vmem (ptr_x0++#1):nt //[1, 4]
x1.b = vshuff(x1.b) //[1, 4]
} {
d0.w = vmpye(s0.w, vrecip.uh) //[0,15]multiply by 1/max
s1.w = vsub(s1.w, z1.w) //
s2.w = vasl(s2.w, recip_shift) //
} {
d0.w += vmpyo(s0.w, vrecip.h):SSR //[0,17]3
s3.w = vadd(s3.w, bias_val.w) //[WIDTH, P]
x0_3232.h = vshuffo(x0_3210.h, x0_3210.h) //[1, 5]
} {
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
s3.w = vsub(s3.w, z3.w) //
s1.w = vasl(s1.w, recip_shift) //
} {
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]3
min.w = vmin(min.w, d0.w) //[0,22]8 //0+2+1
max.w = vmax(max.w, d0.w) //[0,18]5 //0+2+1
} {
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
min.w = vmin(min.w, d2.w) //[0,22]8 //0+2+1
s3.w = vasl(s3.w, recip_shift) //
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]9
max.w = vmax(max.w, d2.w) //[0,18]5 //0+2+1
x0_7654.b = vshuff(x0.b) //[1, 5]
} {
min.w = vmin(min.w, d1.w) //[0,27]13 //0+2+1
max.w = vmax(max.w, d1.w) //[0,26]12 //0+2+1
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
} {
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]9
x0_5432.h = vshuffe(x0_7654.h,x0_3232.h) //[1, 7]
} {
max.w = vmax(max.w, d3.w) //[0,26]12 //0+2+1
min.w = vmin(min.w, d3.w) //[0,27]13 //0+2+1
s0.uw = vrmpy(x0_3210.ub, w0_210.ub) //[1, 6]filter even output
}:endloop0:endloop1 //end width, depth
/* ----------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
out_height = add(out_height, #-1) //
p2 = !cmp.eq(r0,r0) //
bias_ptr = bias_sum //
} {
p0 = cmp.eq(out_height, #0) //
in_buf=add(in_buf,next_in_width_depth_stride) //stride
lc1 = depth //loop1(.L_depth, depth)
ptr_w0 = filt //
} {
ptr_xin = in_buf //
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
vmem (ptr_y+#0) = d3210.new //[WIDTH, E]
if(!p0) jump .L_height //
}//end height
/* ----------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s2_3x3_asm, .L_end-dwconv2dbbb_s2_3x3_asm
/* ----------------------------------------------------------------------------- */ |
XiaoMi/nnlib | 20,271 | hexagon/asm_src/gvconv2dbbbs1x4_d32_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbbs1x4_d32_v66_h.S"
.global gvconv2dbbbs1x4_v66_asm
.balign 64
.type gvconv2dbbbs1x4_v66_asm, @function
gvconv2dbbbs1x4_v66_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xi r0 //data aligned 128
#define ptr_wi r1 //weights aligned 128
#define ptr_zi r2 //results aligned 128
#define in_width r3 //(pad_l+in_width+pad_r) => 4 %4
#define out_next_row r4 //value in bytes to get to next full out row
#define out_width r5 //out_width_pad
#define stride_h_w r26 //0 stride_height|stride_width
#define in_depth r27 //1 %32
#define in_depth_stride_h_w r27:26
#define filt_width r8 //2 >= 1
#define filt_height r9 //3 >= 1filt_height lines per filter
#define filt_height_width r9:8 //
#define out_height r10 //4 >= 1 number of vertical lines to perform
#define ptr_filtsum r11 //5 aligned 128
#define ptr_max r12 //6 aligned 128
#define recip_level r13 //7 recip is 31bit unsigned 0x7f800000000LL / max
#define recip_level_ptr_max r13:12
#define out_next_d32 r19 //10
#define nslice r21 //11
#define recip_shamt r14 //12
/*=============================================================================*/
//#define stride_h r26 //0 stride_height|stride_width
#define ptr_x3 r7 //jump to input ptr for next set of outputs
#define filt_cnt0 r6 //
#define in_next_rows r15 //in_width * stride_h * in_depth for next output
#define ptr_x0 r16 //
#define ptr_x1 r17 //
#define ptr_w_next r18 //
#define filt_cnt r19 //
#define ptr_w r20 //
#define in_width_32 r22 //
#define ptr_x2 r23 //
#define ptr_z r24 //
#define col_count r25 //
#define next_ptr r27 //
#define pred_true cmp.eq(r0,r0) //
#define pred_false !cmp.eq(r0,r0) //
/*=============================================================================*/
#define PV32(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v22 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210 v6 //
#define x3_prev v16 //previous value
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define maxe v12 //
#define mine v13 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define recipvec v15 //
#define recip_sh_vec v17 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
/*=============================================================================*/
{ filt_height_width = memd(sp+#8) //
recip_level_ptr_max = memd(sp+#24) //
ptr_xi = and(ptr_xi, #-2) //make sure lsb is 0
sp = add(sp,#-64) //
} {
memd(sp+#40) = r27:26 //
in_depth_stride_h_w = memd(sp+#(64+0)) //
p2 = pred_false //
nop //
} {
memd(sp+#32) = r25:24 //
memd(sp+#24) = r23:22 //
col_count = out_width //out_width
in_next_rows= mpy(stride_h_w.H, in_depth.L) //
} {
memd(sp+#16) = r21:20 //
in_next_rows =mpyi(in_width,in_next_rows) //total vertical stride bytes
filt_height = mpy(filt_height.L,in_depth.L)//filt_height*in_depth
maxe = vmem(ptr_max+#0) //
} {
memd(sp+#8) = r19:18 //
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
mine = vmem(ptr_max+#1) //
} {
memd(sp+#0) = r17:16 //
ptr_filtsum = memw(sp+#21<<2) //ptr pre computed weight sum
filt_width = asl(filt_width, #1) //x4 to account for loop of 16 bytes
loop1(.L_filt_height, filt_height) //[P,0]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
} {
nslice = memw(sp+#(64+44)) //
recip_shamt = memw(sp+#(64+48)) //
filt_cnt0 = mpyi(filt_width, filt_height) //
filt_width = add(filt_width, #-2) //account for epilog
} {
memw(sp+#48) = ptr_xi // store ptr_xi
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
recip_sh_vec = vsplat(recip_shamt) //
nop //
}
.balign 64
/*=============================================================================*/
.L_depth:
{ ptr_w = ptr_wi //ptr_y=ptr_yi init filter pointer
out_height = memw(sp+#20<<2) //number of output lines
nslice = add(nslice,#-1) //
recipvec = vmem(recip_level++#1) //
} {
memw(sp+#56) = ptr_zi //
ptr_z = ptr_zi //
ptr_x2 = and(ptr_xi, #-128) //aligned
wsum = vmem(ptr_filtsum++#1) //
} {
s1s0 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
s3s2 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
z = vmem(ptr_x2+#0) //load 0-127 bytes into z buffer
ptr_x0 = ptr_xi //ptr_xi
}
/*=============================================================================*/
.L_height:
{ p3 = pred_true //
filt_cnt = sub(filt_cnt0, filt_height)
ptr_x3 = add(ptr_xi, #100) //reset ptr for next row of filter taps
z = vmem(ptr_x2+#1) //load 128-255
}
/*=============================================================================*/
.balign 64
.L_width:
.L_filt_height:
.L_filt_width:
{ w0.tmp = vmem(ptr_w++#1) //[1, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 0]perform mac across 4 streams with saem weights
if(p3)col_count=add(col_count,#-4) //count -=4 ptr_z += 128
} {
w0.tmp = vmem(ptr_w++#1) //[1, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 1]perform mac across 4 streams with saem weights
if(p3) ptr_x1 = add(ptr_x0, in_width_32) //[E, 0]move to next line ptr_y keeps going
filt_cnt = add(filt_cnt, #-1) //
} {
w0.tmp = vmem(ptr_w++#1) //[1, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 2]perform mac across 4 streams with saem weights
if(p3) ptr_x0 = add(ptr_x0, #128) //
} {
p3 = pred_false //
w0.tmp = vmem(ptr_w++#1) //[1, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 3]perform mac across 4 streams with saem weights
z = vmem(ptr_x3+#0) //load next stride=1 128 or stride=2 64 bytes
}:endloop0
/*=============================================================================*/
{ w0.tmp = vmem(ptr_w++#1) //[1, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 0]perform mac across 4 streams with saem weights
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
} {
w0.tmp = vmem(ptr_w++#1) //[1, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 1]perform mac across 4 streams with saem weights
filt_cnt = add(filt_cnt, #-1) //
} {
w0.tmp = vmem(ptr_w++#1) //[1, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 2]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 3]perform mac across 4 streams with saem weights
z = vmem(ptr_x3+#0) //load next stride=1 128 or stride=2 64 bytes
} {
w0.tmp = vmem(ptr_w++#1) //[1, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 0]perform mac across 4 streams with saem weights
p0 = cmp.eq(filt_cnt, #0) //
} {
w0.tmp = vmem(ptr_w++#1) //[1, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 1]perform mac across 4 streams with saem weights
p1 = cmp.eq(col_count, #0) //[E0, 4]compare for branch
next_ptr = mux(p0, ptr_x0, ptr_x1) //
} {
w0.tmp = vmem(ptr_w++#1) //[1, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub++) //[1, 2]perform mac across 4 streams with saem weights
//ptr_x2 = and(ptr_x1, #-128) //Taligned
ptr_x2 = and(next_ptr, #-128) //Taligned
} {
w0.tmp = vmem(ptr_w++#1) //[1, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x3.ub ) //[1, 3]perform mac across 4 streams with saem weights
z = vmem(ptr_x2+#0) //Tload 0-127 bytes into z buffer
} {
ptr_x1 = add(ptr_x1, in_width_32) //[E, 0]move to next line ptr_y keeps going
//ptr_x3 = add(ptr_x1, #100) //Treset ptr for next row of filter taps
ptr_x3 = add(next_ptr, #100) //Treset ptr for next row of filter taps
z = vmem(ptr_x2+#1) //Tload 128-255
}:endloop1
/*=============================================================================*/
{ s0.w = vasl(s0.w, recip_sh_vec.w) //[E0, ]
ptr_w_next = ptr_w //[E0, ]
} {
s1.w = vasl(s1.w, recip_sh_vec.w) //[E0, ]
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
} {
y0.w = vmpye(s0.w, recipvec.uh) //[E0, 1](s2 * recip + rnd)>>31
loop1(.L_filt_height, filt_height) //[P,0]for(filt_y=0;filt_y<height*in_depth/32;filt_y++)
} {
y0.w+= vmpyo(s0.w, recipvec.h):RSS //[E0, ]<<1:rnd:sat:shift //[E0, 2]
s2.w = vasl(s2.w, recip_sh_vec.w) //[E0, ]
s0 = wsum //[P, 0]initialize accumulators
} {
maxe.w = vmax(maxe.w, y0.w) //[E0, 0]see if s0 is max
mine.w = vmin(mine.w, y0.w) //[E0, 0]see if s0 is min
y1.w = vmpye(s1.w, recipvec.uh) //[E0, 3](s2 * recip + rnd)>>31
} {
s3.w = vasl(s3.w, recip_sh_vec.w) //[E0, ]
y1.w+= vmpyo(s1.w, recipvec.h):RSS //[E0, ]<<1:rnd:sat:shift (s2 * recip + rnd)>>31
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]#sat8 <0, >255
} {
maxe.w = vmax(maxe.w, y1.w) //[E0, 2]
y2.w = vmpye(s2.w, recipvec.uh) //[E0, 5](s2 * recip + rnd)>>31
x3210.ub = vpack(x3.h, x1.h):sat //[E1, .]#sat8 <0, >255
if(p2) vmem(ptr_z++#1):nt = x3210.new //[E1, .]store 2nd 32bytes
} {
mine.w = vmin(mine.w, y1.w) //[E0, 2]see if z0 is max
ptr_w = ptr_wi //[E0, 5]ptr_y=ptr_yi init filter pointer
y2.w+= vmpyo(s2.w, recipvec.h):RSS //[E0. ]<<1:rnd:sat:shift s2 * recip + rnd)>>31
x1.h = vpack(y1.w, y0.w):sat //[E1, 0]#>>16
} {
maxe.w = vmax(maxe.w, y2.w) //[E0, 3]
y3.w = vmpye(s3.w, recipvec.uh) //[E0, 7]#(s2 * recip + rnd)>>31
p2 = pred_true //[P, ]p2 = 1
s1 = wsum //[P, 0]initialize accumulators
} {
mine.w = vmin(mine.w, y2.w) //[E0, 4]see if z0 is max
y3.w+= vmpyo(s3.w, recipvec.h):RSS //[E0, 8](s2 * recip + rnd)>>31
p3 = pred_true //[P, ]
filt_cnt = sub(filt_cnt0, filt_height) //[P, ]
} {
s3s2 = vcombine(wsum,wsum) //[P, 0]initialize accumulators
mine.w = vmin(mine.w, y3.w) //[E0, 5]see if z0 is max
maxe.w = vmax(maxe.w, y3.w) //[E0, 4]
if(!p1) jump .L_width //[E1, 8]
}//cols per line kernel loop width
/*=============================================================================*/
{ out_height = add(out_height, #-1) //Prolog width
ptr_zi = add(ptr_zi, out_next_row) //EEnext out line for this depth segment
ptr_xi= add(ptr_xi,in_next_rows) //ptr_x+=in_width*stride_h*in_depth)
p1 = !cmp.eq(out_height, #1) //EE
} {
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]#sat8 <0, >255
ptr_x2 = and(ptr_xi, #-128) //aligned
ptr_x0 = ptr_xi //ptr_xi
} {
col_count = out_width //out_width
if (p1) z = vmem(ptr_x2+#0) //load 0-127 bytes into z buffer
p2 = pred_false
} {
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255
vmem(ptr_z+#0):nt = x3210.new //[E1, 6]store 2nd 32bytes
if (p1) jump:t .L_height //EE
ptr_z = ptr_zi //
}//end lines per block//last cols per line
/*=============================================================================*/
{
ptr_wi = ptr_w_next // set ptr of weight
ptr_zi = memw(sp+#56) //
out_next_d32 = memw(sp+#(64+40)) //
p1 = cmp.gt(nslice,#0)
} {
ptr_xi = memw(sp+#48) // restore ptr_xi
ptr_zi = add(ptr_zi,out_next_d32) //
if p1 jump .L_depth //
}
/*=============================================================================*/
{ vmem(ptr_max+#0) = maxe //[E, 0]32max
r17:16 = memd(sp+#0) //restore r16, r17from stack
} {
vmem(ptr_max+#1) = mine //[E, 0]32min
r19:18 = memd(sp+#8) //restore r18,r19
} {
r21:20 = memd(sp+#16) //restore r20,r11
r23:22 = memd(sp+#24) //restore r22,r13
} {
r25:24 = memd(sp+#32) //restore r24,r15
r27:26 = memd(sp+#40) //restore r26,r17
sp = add(sp,#64) //pop stack
jumpr r31 // return
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbbs1x4_v66_asm, .L_end-gvconv2dbbbs1x4_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 19,689 | hexagon/asm_src/dwconv2dbbb_s1_3x3_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_s1_3x3_asm
*
* DESCRIPTION
* Perform 2d depthwise convolution using elements along depth, do only simple
* convolution. THis is optimized for case of 3x3 filter horizontal stride of 1.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V6x + HVX
*
* REVISION HISTORY:
* =================
*
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 06/26/19 created
*
* MEMORY
* CODESIZE = 768 bytes
* STACK = 64 bytes
* ASSUMPTIONS
* width multiple of 4 depth multiple of 32 aligned to 128bytes
* BIT ACCURATE C MODEL
*/
#if 0
void dwconv2dbbb_s1_3x3_cn(
uint8_t *in_buf,
uint8_t *filt,
uint8_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int filt_width,
int filt_height,
int filt_zero,
int32_t *bias_sum,
int32_t *max,
int recip_level,
int recip_shift,
int stride_height)
{
int out_y, d, out_x, ur, in_val, filt_val;
int out_z, filt_y, filt_x, buf_offset;
int out_width_pad = (out_width+3)&(~3);
int32_t sum, zum;
int64_t lsum ;
int o_filt_width = (filt_width+3)&(~3);
int stride_width = 1;
for (out_y = 0; out_y < out_height; out_y++) {
for (out_x = 0; out_x < out_width_pad; out_x+=4) {
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
for(ur=0; ur < 4; ur++) {
sum = (int32_t)bias_sum[32*d+out_z];
zum = 0;
for (filt_y = 0; filt_y < filt_height; filt_y++) {
for (filt_x = 0; filt_x < o_filt_width; filt_x++) {
buf_offset = (out_y * stride_height + filt_y) * next_in_width
+ d * next_in_width_32
+ (out_x*stride_width + ur*stride_width + filt_x) * 32
+ out_z;
in_val = in_buf[buf_offset];
filt_val = filt[32*d*filt_height*o_filt_width
+ (o_filt_width*filt_y)*32
+ out_z*4 + 128*(filt_x/4)
+ (filt_x % 4)] ;
sum += (uint32_t)in_val*(int32_t)filt_val;
if(filt_x < filt_width)
zum += (uint32_t)in_val*(int32_t)filt_zero;
}
}
sum = sum - zum;
sum <<= recip_shift;
lsum = (int64_t)sum * ((int64_t)recip_level) + 0x40000000LL;
lsum = lsum >> 31;
sum = (int)lsum;
max[out_z] = (sum > max[out_z]) ? sum : max[out_z];
max[out_z+32] = (sum < max[out_z+32]) ? sum : max[out_z+32];
if(lsum < 0) lsum = 0; if(lsum > 0xffll) lsum = 0xffll;
out_buf[out_y * next_out_width
+ 32 * (out_x+ur)
+ d * next_out_width_32
+ out_z] = (uint8_t) lsum;
}//ur
}//out_z
}//d
}//out_x
}//out_y
return;
}
#endif
/* ----------------------------------------------------------------------------- */
/* ============================================================================= */
.text
.file "dwconv2dbbb_s1_3x3_h.S"
.global dwconv2dbbb_s1_3x3_asm
.balign 32
.type dwconv2dbbb_s1_3x3_asm, @function
dwconv2dbbb_s1_3x3_asm:
/* ----------------------------------------------------------------------------- */
//i/o registers
#define in_buf r0
#define filt r1
#define out_buf r2
#define next_in_width_depth r3
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5
#define next_out_width_32 r6
#define depth r7
#define out_width r8
#define out_height r9
#define filt_height r6 //nc
#define filt_zero r10
#define bias_sum r11
#define ptr_max r12
#define recip_level r13
#define recip_shift r14
#define stride_v r28
//state scaler registers
#define ptr_w0 r15
#define bias_ptr r16
#define ptr_x0 r17
#define ptr_x1 r18
#define ptr_x2 r19
#define ptr_xin r20
#define ptr_y r21
#define next_in_width_depth_stride r28
#define zzzz r10 //111111111111
#define _zzz r10 //111111111111
#define zzz_ r22 //111111111111
#define s8 r23
//vector registers
#define vrecip v0
#define max v1
#define min v2
#define bias_val v3
#define x0 v4 //___11_______
#define x1 v5 //_111________
#define x2 v14 //_111________
#define w0_210 v6 //_______111__
#define w1_210 v7 //_______111__
#define w2_210 v8 //_______111__
#define w0210_ v9 //_______111__
#define w1210_ v11 //_______111__
#define w2210_ v13 //_______111__
#define x0_3210 v10 //111111111111
#define x1_3210 v12 //111111111111
#define x2_3210 v14 //111111111111
#define x0_7654 v30 //111111111111
#define x1_7654 v28 //111111111111
#define x2_7654 v29 //111111111111
#define x0_3232 v31 //111111111111
#define x1_3232 v31 //111111111111
#define x2_3232 v31 //111111111111
#define x0_5432 v27 //111111111111
#define x1_5432 v27 //111111111111
#define x2_5432 v27 //111111111111
#define s0 v15 //111111111111
#define s1 v16 //111111111111
#define s2 v17 //111111111111
#define s3 v18 //111111111111
#define z0 v19 //111111111111
#define z1 v20 //111111111111
#define z2 v21 //111111111111
#define z3 v22 //111111111111
#define d0 v23 //
#define d1 v24 //
#define d1d0 v23 //
#define d2 v25 //
#define d3 v26 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on sbuf (56+8)/4=20
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
ptr_xin = in_buf //
ptr_y = out_buf //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
s8 = #8 //
ptr_w0 = filt //
} {
bias_sum = memw(sp+#22<<2) //
depth = memw(sp+#17<<2) //
p2 = !cmp.eq(r0,r0) //
} {
bias_ptr = bias_sum //
next_out_width_32 = memw(sp+#16<<2) //
depth = lsr(depth, #5) //depth/32
out_width = memw(sp+#18<<2) //
} {
loop1(.L_depth, depth) //
out_width = add(out_width, #3) //
filt_zero = memw(sp+#21<<2) //
out_height = memw(sp+#19<<2) //
} {
zzzz = vsplatb(filt_zero) //
out_width = lsr(out_width, #2) //
ptr_max = memw(sp+#23<<2) //
recip_level = memw(sp+#24<<2) //
} {
_zzz = lsr(zzzz, #8) //
stride_v = memw(sp+#26<<2) //
max = vmem(ptr_max+#0) //
} {
zzz_ = asl(_zzz, #8) //
min = vmem(ptr_max+#1) //
recip_shift = memw(sp+#25<<2) //
next_in_width_depth_stride = mpyi(next_in_width_depth, stride_v)
}
{
vrecip = vmem(recip_level)
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
.L_depth:
{ x0.tmp = vmem (ptr_xin+#0)
x0.b = vshuff(x0.b)
ptr_x1 = add(ptr_xin, next_in_width_depth) //[0, 4]
ptr_x0 = add(ptr_xin, #128) //
} {
x1.tmp = vmem (ptr_x1++#1)
x1.b = vshuff(x1.b)
ptr_x2 = add(ptr_x1, next_in_width_depth) //[0, 4]
} {
p3 = sp1loop0(.L_width, out_width)
x2.tmp = vmem (ptr_x2++#1)
x2.b = vshuff(x2.b)
} {
x0_3210.b = vshuff(x0.b)
w0_210.cur = vmem(ptr_w0++#1)
w0210_.w = vasl(w0_210.w, s8)
} {
x1_3210.b = vshuff(x1.b)
w1_210.cur = vmem(ptr_w0++#1)
w1210_.w = vasl(w1_210.w, s8)
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
} {
s0.uw = vrmpy(x0_3210.ub, w0_210.ub) //[0, 6]filter even output
x2_3210.b = vshuff(x2.b)
w2_210.cur = vmem(ptr_w0++#1)
w2210_.w = vasl(w2_210.w, s8)
} {
#if __HEXAGON_ARCH__ < 62
bias_val = vmem(bias_ptr++#1) //
#endif
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ x0.tmp = vmem (ptr_x0++#1) //[0, 0]
x0.b = vshuff(x0.b) //[0, 0]
s1.uw = vrmpy(x0_3210.ub, w0210_.ub) //[0, 9]filter even output
} {
s0.uw += vrmpy(x2_3210.ub, w2_210.ub) //[0,18]filter even output
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
#if __HEXAGON_ARCH__ >= 62
if(!p3) bias_val = vmem(bias_ptr++#1) //
#endif
} {
x0_7654.b = vshuff(x0.b) //[0, 4]
x0_3232.h = vshuffo(x0_3210.h, x0_3210.h) //[0, 4]
s0.uw += vrmpy(x1_3210.ub, w1_210.ub) //[0,12]filter even output
} {
s1.uw += vrmpy(x1_3210.ub, w1210_.ub) //[0,15]filter even output
x0_5432.h = vshuffe(x0_7654.h,x0_3232.h) //[0, 6]
x0.tmp = vmem (ptr_x1++#1) //[0, 6]
x0.b = vshuff(x0.b) //[0, 8]
} {
z0.uw = vrmpy(x0_3210.ub, _zzz.ub) //[0, 7]filter even output
z1.uw = vrmpy(x0_3210.ub, zzz_.ub) //[0,10]filter even output
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
if(p2) vmem (ptr_y++#1) = d3210.new //[WIDTH, E]
} {
s2.uw = vrmpy(x0_5432.ub, w0_210.ub) //[0, 8]filter even output
x1_7654.b = vshuff(x0.b) //[0,10]
x1_3232.h = vshuffo(x1_3210.h, x1_3210.h) //[0,10]
if(!p3)ptr_y = out_buf //
} {
z2.uw = vrmpy(x0_5432.ub, _zzz.ub) //[0, 7]filter even output
z3.uw = vrmpy(x0_5432.ub, zzz_.ub) //[0,10]filter even output
x0_3210 = x0_7654 //[0, 9]
p2 = cmp.eq(r0,r0) //
} {
s3.uw = vrmpy(x0_5432.ub, w0210_.ub) //[0,11]filter even output
x1_5432.h = vshuffe(x1_7654.h,x1_3232.h) //[0,12]
x0.tmp = vmem (ptr_x2++#1) //[0,12]
x0.b = vshuff(x0.b) //[0,14]
} {
z0.uw += vrmpy(x1_3210.ub, _zzz.ub) //[0,13]filter even output
z1.uw += vrmpy(x1_3210.ub, zzz_.ub) //[0,16]filter even output
s0.w = vadd(s0.w, bias_val.w) //[WIDTH, P]
s1.w = vadd(s1.w, bias_val.w) //[WIDTH, P]
} {
s2.uw += vrmpy(x1_5432.ub, w1_210.ub) //[0,14]filter even output
x2_7654.b = vshuff(x0.b) //[0,16]
if(!p3) out_buf = add(out_buf, next_out_width_32) //[DEPTH]
} {
x2_3232.h = vshuffo(x2_3210.h, x2_3210.h) //[0,16]
z2.uw += vrmpy(x1_5432.ub, _zzz.ub) //[0,13]filter even output
z3.uw += vrmpy(x1_5432.ub, zzz_.ub) //[0,16]filter even output
x1_3210 = x1_7654 //[0,15]
} {
s3.uw += vrmpy(x1_5432.ub, w1210_.ub) //[0,17]filter even output
x2_5432.h = vshuffe(x2_7654.h,x2_3232.h) //[0,18]
} {
z0.uw += vrmpy(x2_3210.ub, _zzz.ub) //[0,19]filter even output
z1.uw += vrmpy(x2_3210.ub, zzz_.ub) //[0,22]filter even output
s2.w = vadd(s2.w, bias_val.w) //[WIDTH, P]
} {
s1.uw += vrmpy(x2_3210.ub, w2210_.ub) //[0,21]filter even output
s0.w = vsub(s0.w, z0.w) //
} {
z2.uw += vrmpy(x2_5432.ub, _zzz.ub) //[0,19]filter even output
z3.uw += vrmpy(x2_5432.ub, zzz_.ub) //[0,22]filter even output
s1.w = vsub(s1.w, z1.w) //
} {
s2.uw += vrmpy(x2_5432.ub, w2_210.ub) //[0,20]filter even output
s0.w = vasl(s0.w, recip_shift) //
s3.w = vadd(s3.w, bias_val.w) //[WIDTH, P]
} {
s3.uw += vrmpy(x2_5432.ub, w2210_.ub) //[0,23]filter even output
s2.w = vsub(s2.w, z2.w) //
s1.w = vasl(s1.w, recip_shift) //
} {
d0.w = vmpye(s0.w, vrecip.uh) //[0,15]multiply by 1/max
s3.w = vsub(s3.w, z3.w) //
x2_3210 = x2_7654 //[0,21]
} {
s2.w = vasl(s2.w, recip_shift) //
d0.w += vmpyo(s0.w, vrecip.h):SSR //[0,17]3
} {
s3.w = vasl(s3.w, recip_shift) //
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
min.w = vmin(min.w, d0.w) //[0,22]8 //0+2+1
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]9
max.w = vmax(max.w, d0.w) //[0,18]5 //0+2+1
} {
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
max.w = vmax(max.w, d1.w) //[0,26]12 //0+2+1
min.w = vmin(min.w, d1.w) //[0,27]13 //0+2+1
} {
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]3
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
} {
min.w = vmin(min.w, d2.w) //[0,22]8 //0+2+1
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
} {
max.w = vmax(max.w, d2.w) //[0,18]5 //0+2+1
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]9
} {
max.w = vmax(max.w, d3.w) //[0,26]12 //0+2+1
min.w = vmin(min.w, d3.w) //[0,27]13 //0+2+1
s0.uw = vrmpy(x0_3210.ub, w0_210.ub) //[1, 6]filter even output
}:endloop0:endloop1 //end width depth
/* ----------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
out_height = add(out_height, #-1) //
p2 = !cmp.eq(r0,r0) //
lc1 = depth //loop1(.L_depth, depth)
} {
p0 = cmp.eq(out_height, #0) //
in_buf=add(in_buf,next_in_width_depth_stride)//stride
ptr_w0 = filt
bias_ptr = bias_sum //
} {
ptr_xin = in_buf
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
vmem (ptr_y+#0) = d3210.new //[WIDTH, E]
if(!p0) jump .L_height //
}//end height
/* ----------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s1_3x3_asm, .L_end-dwconv2dbbb_s1_3x3_asm
/* ----------------------------------------------------------------------------- */ |
XiaoMi/nnlib | 9,905 | hexagon/asm_src/quant_add_spec_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Behavioral Model
----------------
speculative add two streams with guess of max and min
*/
#if 0
void quant_add_spec_casm(uint8_t *aq, float amax, float amin,
uint8_t *bq, float bmax, float bmin,
float gmax, float gmin, //guess
uint8_t * cq, float *cmax, float *cmin, int length)
{
float stepa = amax-amin;
float stepb = bmax-bmin;
float step, lmin, lmax;
float alpha = stepa/stepb;
short ialpha = 128.0*alpha;
float kappa = 128.0*alpha +(255.0*amin + 255.0*bmin)/stepb ;
short ikappa = (int) (kappa+.0); //+ialpha is because input is 08 ^
//compute local max,min by updating local
lmin = (gmin * 255.0)/stepb;
lmax = (gmax * 255.0)/stepb;
step = lmax - lmin;
float frecip = (2.0 * 255.0 * 32768.0) / step;
float foffset = (2.0 * 255.0 * lmin) / step;
short recip = (int) (frecip +0.0);
short offset = (int) (foffset-1.0) ;
quant_add_spec_asm(aq, bq, ialpha, ikappa, offset, recip, cq, ptr_max, length);
lmax = (float)ptr_max[0];
lmin = (float)ptr_max[64];
//turn back to global max
*cmin = (lmin*stepb)/255.0;
*cmax = (lmax*stepb)/255.0;
return;
}
#endif
/*
Methods+Notes
-------------
for(i=0; i < length; i++) {
a = aq[i];
c = ((ialpha * a<<8 + 0x4000)>>7) + bq[i] + ikappa;
if(c > omax) omax = c;
if(c < omin) omin = c;
int oval = ((tmp[i]-imin) * recip + 0x4000)>>15;
if(oval > 255) cq[i] = 255; else if (oval < 0) cq[i] = 0; else cq[i] = (uint8_t) oval;
}
*/
/* -------------------------------------------------------------*/
.text
.file "quant_add_spec_h.S"
.global quant_add_spec_asm
.balign 32
.type quant_add_spec_asm, @function
/* -------------------------------------------------------------*/
quant_add_spec_asm:
/* -------------------------------------------------------------*/
#define ptr_a r0 //pointer to input data
#define ptr_b r1 //
#define alpha r2 //
#define kappa r3 //pointer to input zero
#define offset r4
#define recip r5
#define ptr_c r9
#define ptr_minmax r8 //pointer to output unquantized output
#define length r6 //
#define c2 r7
/* -------------------------------------------------------------*/
/* -------------------------------------------------------------*/
#define xh1xh0 v9:8
#define yh1yh0 v7:6
#define xh1 v9
#define xh0 v8
#define yh1 v7
#define yh0 v6
#define wh1wh0 v5:4
#define wh1 v5
#define wh0 v4
#define zh1zh0 v15:14
#define zh1 v15
#define zh0 v14
#define uh1uh0 v21:20
#define uh1 v21
#define uh0 v20
#define xb1xb0 v16
#define yb1yb0 v17
#define zb1zb0 v18
#define mino_mine v11:10
#define maxo_maxe v13:12
#define mino v11
#define mine v10
#define maxo v13
#define maxe v12
#define maxe0 v22
#define vzero v3
#define vkappa v0
#define vc80 v1
#define valpha v2
#define voffset v19
#define VEXP <<1:rnd:sat
/* -------------------------------------------------------------*/
{
r10 = ##0x7fff7fff
r11 = ##0x80808080
} {
vzero = #0
mine = vsplat(r10)
ptr_c = memw(sp+#0<<2)
vc80 = vsplat(r11)
} {
maxe = vnot(mine) //set to 0x8000
xb1xb0.tmp = vmem(ptr_a++#1) //[0, 0]
xb1xb0 = vxor(xb1xb0, vc80) //[0, 0]
length= memw(sp+#2<<2)
} {
alpha = combine(alpha.L, alpha.L)
kappa = combine(kappa.L, kappa.L)
xh1xh0.b = vshuffoe(xb1xb0.b, vzero.b) //[0, 1]
length = add(length, #-1) // round down
} {
vkappa = vsplat(kappa)
yb1yb0.tmp = vmem(ptr_b++#1) //[0, 2]
yh1yh0.uh = vzxt(yb1yb0.ub) //[0, 2]
length = lsr(length, #7) // truncate off bits
} {
maxe0 = maxe
xh0.h = vmpy(xh0.h, alpha.h):VEXP //[0, 3]
wh0.h = vadd(yh0.h, vkappa.h) //[0, 3]
loop0(.L_quant, length) //[P, 5]
} {
offset = combine(offset.L, offset.L)
recip = combine(recip.L, recip.L)
xh1.h = vmpy(xh1.h, alpha.h):VEXP //[0, 4]
zh0.h = vadd(xh0.h, wh0.h) //[0, 4]
} {
voffset = vsplat(offset) //[P, 5]
wh1.h = vadd(yh1.h, vkappa.h) //[0, 3]
maxe0.h = vmax(maxe0.h, zh0.h) //[0, 5]
} {
zh1.h = vadd(xh1.h, wh1.h) //[0, 5]
p0 = cmp.eq(length,#0) ; if (p0.new) jump:nt .Loopbot
}
/* -------------------------------------------------------------*/
.balign 32
.L_quant:
{ uh0.h = vmpy(zh0.h, recip.h):VEXP //[0, 6]
mine.h = vmin(mine.h, zh0.h) //[0, 6]
xb1xb0.tmp = vmem(ptr_a++#1) //[1, 0]
xb1xb0 = vxor(xb1xb0, vc80) //[1, 0]
} {
uh1.h = vmpy(zh1.h, recip.h):VEXP //[0, 7]
xh1xh0.b = vshuffoe(xb1xb0.b, vzero.b) //[1, 1]
} {
uh0.h = vsub(uh0.h, voffset.h) //[0, 8]
uh1.h = vsub(uh1.h, voffset.h) //[0, 8]
yb1yb0.tmp = vmem(ptr_b++#1) //[1, 2]
yh1yh0.uh = vzxt(yb1yb0.ub) //[1, 2]
} {
xh0.h = vmpy(xh0.h, alpha.h):VEXP //[1, 3]
wh0.h = vadd(yh0.h, vkappa.h) //[1, 3]
wh1.h = vadd(yh1.h, vkappa.h) //[1, 3]
} {
zb1zb0.ub = vsat(uh1.h, uh0.h) //[0, 9]
vmem(ptr_c++#1) = zb1zb0.new //[0, 9]
xh1.h = vmpy(xh1.h, alpha.h):VEXP //[1, 4]
zh0.h = vadd(xh0.h, wh0.h) //[1, 4]
} {
maxe.h = vmax(maxe.h, zh1.h) //[0,10]
mine.h = vmin(mine.h, zh1.h) //[0,10]
maxe0.h = vmax(maxe0.h, zh0.h) //[1, 5]
zh1.h = vadd(xh1.h, wh1.h) //[1, 5]
}:endloop0
.Loopbot:
/* -------------------------------------------------------------*/
{ uh0.h = vmpy(zh0.h, recip.h):VEXP //[1, 6]
mine.h = vmin(mine.h, zh0.h) //[1, 6]
} {
uh1.h = vmpy(zh1.h, recip.h):VEXP //[1, 7]
maxe.h = vmax(maxe.h, zh1.h) //[1,10]
uh0.h = vsub(uh0.h, voffset.h) //[1, 8]
} {
uh1.h = vsub(uh1.h, voffset.h) //[1, 8]
mine.h = vmin(mine.h, zh1.h) //[1,10]
} {
ptr_minmax = memw(sp+#1<<2) //
loop0(.L_minmax, #5) //
c2 = #-2 //
} {
zb1zb0.ub = vsat(uh1.h, uh0.h) //[1, 9]
vmem(ptr_c++#1) = zb1zb0.new //[1, 9]
maxe.h = vmax(maxe0.h, maxe.h)
}
/* -------------------------------------------------------------*/
.L_minmax:
{
maxo_maxe = vdeal(maxe, maxe, c2) //[0, 0]
} {
mino_mine = vdeal(mine, mine, c2) //[0, 1]
maxe.h = vmax(maxe.h, maxo.h) //[0, 1]
} {
mine.h = vmin(mine.h, mino.h) //[0, 2]
}:endloop0
{ maxo_maxe = vdeal(maxe, maxe, c2) //[1, 0]
} {
mino_mine = vdeal(mine, mine, c2) //[1, 1]
maxe.h = vmax(maxe.h, maxo.h) //[1, 1]
vmem(ptr_minmax+#0)= maxe.new
} {
mine.h = vmin(mine.h, mino.h) //[1, 2]
vmem(ptr_minmax+#1)= mine.new
} {
jumpr r31
}
.L_end:
/* -------------------------------------------------------------*/
.size quant_add_spec_asm, .L_end-quant_add_spec_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 7,940 | hexagon/asm_src/gvsuma_16b_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
DESCRIPTION
Perform the sum of a square of activations using integral as input. The correction
factor const - sum( a(,)*filt_offset.
*/
/* --------------------------------------------------------------------------------------- */
.text
.global gvsuma_16b
.balign 32
.type gvsuma_16b, @function
gvsuma_16b:
/* --------------------------------------------------------------------------------------- */
#define ptr_xi r0 //integral input ints
#define ptr_zi r1 //filter output
#define integral_width r2 //pad_l+in_width+pad_r
#define next_int_width r3 //distance to next output > (in_width + 31)&~31
#define stride_h r4 //vertical stride
#define filt_width r5 //filter width
#define filt_height r6 //rows of filter
#define out_height r7 //number of required output rows
#define offset r8 //K*in_depth*filt_offset*in_offset
#define in_ptrT0 r0 //top row base of filter (1 above actual filter)
#define in_ptrT r9 //top row of filter (1 above actual filter)
#define in_ptrB0 r10 //bottom row base of filter on actual filter)
#define in_ptrB r11 //bottom row of filter on actual filter)
#define out_ptr r13 //temp ptr for output
#define out_width r12 //number of elements to compute on this row
/* --------------------------------------------------------------------------------------- */
#define topLeft v0 //
#define botLeft v1 //
#define Left v2 //
#define topRight v4 //
#define botRight v5 //
#define Right v3 //
#define align_pw v6 //
#define align_m1 v7 //
#define filt_out v8 //
#define filt_out_d v9 //
#define voffset v10 //
#define vzero v11 //
/* --------------------------------------------------------------------------------------- */
{ filt_height = memw(sp+#0<<2) //
out_width = lsr(integral_width, #5) //1 / 32
out_height = memw(sp+#1<<2) //
} {
offset = memw(sp+#2<<2) //
stride_h = mpyi(stride_h, next_int_width) //
filt_height = mpyi(filt_height,next_int_width)//
out_width = add(out_width, #-1) //
} {
topLeft = vmem(in_ptrT0+#0) //[P, 0] t31__t00
in_ptrT = add(in_ptrT0, #128) //[P, 0]
in_ptrB0 = add(ptr_xi, filt_height) //
m0 = stride_h //
} {
botLeft.tmp = vmem(in_ptrB0+#0) //[P, 1] b31__b00
Left.w = vsub(botLeft.w, topLeft.w) //[P, 1] t - b 01234567
loop1(.L_height, out_height) //set up inner loop of horz sum
} {
voffset = vsplat(offset) //[P, 2]K*in_offset*filt_offset
in_ptrT0 = add(in_ptrT0, stride_h) //[P, 2]
in_ptrB = add(in_ptrB0, #128) //[P, 1]
} {
in_ptrB0 = add(in_ptrB0, stride_h) //[P, 2]
filt_width = asl(filt_width, #2) //align to filt_w wirds
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_height:
{ topRight = vmem(in_ptrT++#1) //[0, 0]b63__b32
p3 = sp1loop0(.L_width, out_width) //set up inner loop of horz sum
out_ptr = ptr_zi //
ptr_zi = add(ptr_zi, next_int_width) //update output pointer
} {
botRight.tmp = vmem(in_ptrB++#1) //[0, 1]
Right.w = vsub(botRight.w, topRight.w) //[0, 1]t63__t32
p2 = cmp.eq(out_width, #0) //deal with xcase width <= 32
if(p2.new) jump:nt .L_skip //deal with xcase width <= 32
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_width:
{ align_pw = valign(Right, Left, filt_width) //[0, 3]
topRight = vmem(in_ptrT++#1) //[1, 0]b63__b32
filt_out_d.w = vmpye(filt_out.w, voffset.uh) //[1, 5]
} {
filt_out.w = vsub(Left.w, align_pw.w) //[0, 4]
Left = Right //[0, 4]
botRight.tmp = vmem(in_ptrB++#1) //[1, 1]
Right.w = vsub(botRight.w, topRight.w) //[1, 1]t63__t32
} {
if p3 vmem(out_ptr++#1) = filt_out_d //[1, 6]
}:endloop0
/* --------------------------------------------------------------------------------------- */
.L_skip:
{
filt_out_d.w = vmpye(filt_out.w, voffset.uh) //[1, 5]
if p3 vmem(out_ptr++#1) = filt_out_d.new //[1, 5]
} {
align_pw = valign(Right, Left, filt_width) //[1, 3]
topLeft = vmem(in_ptrT0++m0) //[P, 0] t31__t00
in_ptrT = add(in_ptrT0, #128) //[P, 0]
} {
filt_out.w = vsub(Left.w, align_pw.w) //[1, 4]
botLeft.tmp = vmem(in_ptrB0++m0) //[P, 1] b31__b00
Left.w = vsub(botLeft.w, topLeft.w) //[P, 1] t - b 01234567
in_ptrB = add(in_ptrB0, #128) //[P, 2]
} {
vmem(out_ptr+#0) = filt_out.new //[E, 6]
filt_out.w = vmpye(filt_out.w, voffset.uh) //[1, 5]
}:endloop1
/* --------------------------------------------------------------------------------------- */
{
jumpr r31 //
}
/* --------------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------------- */
.L_end:
.size gvsuma_16b, .L_end-gvsuma_16b
|
XiaoMi/nnlib | 11,530 | hexagon/asm_src/scalemem_d32.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : scalemem_d32 */
/* */
/* DESCRIPTION */
/* copy rows, scaling u8 via given scale/offset */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* GLS 13/09/17 created */
/*======================================================================*/
/* IDEAL-CYCLE-COUNT: */
/* */
/* MEMORY */
/* CODESIZE = 348 bytes */
/* STACK = 0 bytes */
/* ASSUMPTIONS */
/* arrays rows are vector aligned */
/* C MODEL */
/*======================================================================*/
#if 0
//
// input & output are both height x width x (32 bytes)
// ptr_out, stride_out are aligned
// ptr_in, stride_in are aligned
// 'width', 'height' each >= 0.
// scl_off has a scale (<= 32767) in 16 lsbs and a signed offset in the 16 msbs.
// (it will also work for -ve scale).
//
void scalemem_d32_hvx(
uint8_t * ptr_out, int32_t stride_out,
uint8_t const * ptr_in, int32_t stride_in,
int32_t height,
int32_t width,
int32_t scl_off)
{
int i,j;
int fullwid = width *32;
int32_t offs = (slc_off >> 16) << 8; // zero bias
int32_t scl = (int16_t)slc_off
for( i = 0; i < heignt; j++ ){
for( int j = 0; j < fullwid; j ++ ){
int32_t inval = ptr_in[ stride_in * i + j ];
int32_t prod = inval * scl + offs;
int32_t result = (prod + 16384) >> 15;
ptr_out[ stride_out * i + j] = saturate_u8( result );
}
}
// NOTE: if scl_off is in range 32704 .. 32767, the 'scaling' is a no-op and
// it becomes a copy. i.e. if (scl_off>>6) == 511.
// if scl_off is in the range 0x7f808000 .. 0x7f80803f, it becomes a 1's complement
//operation.
}
// The scaling is actually dones as follows:
// (1) multiply input by 8 lsbs of scale (u8*u8)
// (2) mul input by 8 msbs of scale (u8*s8); and add this to (1) result >>8 (lsr)
// this is in (in*scale)>>8
// (3) add the 'offset' using saturated h add
// (4) >>7 , round, sat to u8.
//
#endif
/*=============================================================================*/
.text
.file "scalemem_d32.S"
.global scalemem_d32_hvx
.balign 32
.type scalemem_d32_hvx, @function
scalemem_d32_hvx:
/*=============================================================================*/
#define ptr_out r0
#define stride_out r1
#define ptr_in r2
#define stride_in r3
#define height r4
#define width r5
#define scl_off r6
#define const7 r7
#define scale_lo r8
#define scale_hi r9
#define offs r10
#define rowloops r11
/*=============================================================================*/
#define vzero v0
#define vinp v1
#define vout v2
#define voffs v3
#define vprodA0 v10
#define vprodA1 v11
#define vvprodA v11:10
#define vprodB0 v12
#define vprodB1 v13
#define vvprodB v13:12
#define vvoffs v5:4
#define vprodC0 v10
#define vprodC1 v11
#define vvprodC v11:10
/*=============================================================================*/
{
scl_off = memw(sp+#0) // get scl_off
r14 = add(width, #-1) // will find the loop count with this.
}{
offs = combine( scl_off.h, scl_off.h) // upper 16 bits of offs
scale_lo = vsplatb(scl_off) // lo 8 bits of scale
r15 = asr(scl_off, #8)
}{
scl_off = asr(scl_off,#6)
scale_hi = vsplatb(r15) // hi 8 bits of scale
}{
rowloops = lsr(r14,#2) // # of row loops (one less than vecs needed)
voffs = vsplat(offs)
}{
// if scl_off == 511 now, it means we can use a copy instead.
// if scl_off = 0x1fe0200, we use 2's complement.
//
// the row loops advance ptr_in by (rowloops+1)*128,
// and ptr_out by rowloops*128; so deduct those amounts from stride_in and stride_out
r14 = asl(rowloops,#7) // # find the row advance in h loop
stride_in = add(stride_in, #-128)
r15 = asl(width,#5) // for last-mask
vvoffs = vcombine(voffs,voffs) //
}{
stride_out = sub(stride_out,r14)
stride_in = sub(stride_in, r14)
const7 = #7
vzero = #0
}{
#if __HEXAGON_ARCH__ >= 62
q3 = vsetq2(r15)
#else
q3 = vsetq(r15)
#endif
p0 = cmp.eq(rowloops,#0) // true if no loops
p1 = cmp.eq(scl_off,#511) // is just a copy if so.
}
#if __HEXAGON_ARCH__ < 62
// need to change q3 to all 1's if line width a multiple of 4.
{
p2 = bitsclr(width,#3)
if( !p2.new ) jump:t .L_skip
}{
q3 = not(q3)
}
.L_skip:
#endif
///////////////////////////////////////////
// the copy that needs scaling
///////////////////////////////////////////
{
if( p1 ) jump:nt .L_case_noS;
p2 = cmp.eq(scl_off,#0x1fe0200) //2's complement if so
}{
if (p2) jump:nt .L_case_noS;
loop1( .L_hloop_S, height );
}
.balign 32
.L_hloop_S:
{ vinp.cur = vmem(ptr_in++#1) //[1]load
vvprodA.uh = vmpy( vinp.ub, scale_lo.ub) //[1]mul scale_lo by input bytes to get lo prod
p3 = sp1loop0(.L_wloop_S, rowloops) //
nop
}{
vprodB0.b = vshuffo(vzero.b, vprodA0.b) //[1]>> 8 using vshuffo
vprodB1.b = vshuffo(vzero.b, vprodA1.b) //[1]
}{
vvprodB.h += vmpy( vinp.ub, scale_hi.b) //[1]add the scale_hi product
if( p0 ) jump:nt .L_wloop0_S // skip if loop count = 0
}
.balign 32
.L_wloop_S:
{ vinp.cur = vmem(ptr_in++#1) //[1]load
vvprodA.uh = vmpy( vinp.ub, scale_lo.ub) //[1]mul scale_lo by input bytes to get lo prod
vout.ub = vasr(vprodC1.h, vprodC0.h, const7):rnd:sat //[3]
#if __HEXAGON_ARCH__ > 60
if p3 vmem(ptr_out ++#1) = vout.new //[3]
#endif
}{
vprodB0.b = vshuffo(vzero.b, vprodA0.b) //[1]>> 8 using vshuffo
vprodB1.b = vshuffo(vzero.b, vprodA1.b) //[1]
vvprodC.h = vadd(vvprodB.h, vvoffs.h):sat //[2]add the offset
}{
vvprodB.h += vmpy( vinp.ub, scale_hi.b) //[1]add the scale_hi product
#if __HEXAGON_ARCH__ == 60
if p3 vmem(ptr_out ++#1) = vout //[3]
#endif
}:endloop0
.L_wloop0_S:
{
vout.ub = vasr(vprodC1.h, vprodC0.h, const7):rnd:sat //[3]
if p3 vmem(ptr_out ++ #1) = vout.new //[3]
vvprodC.h = vadd(vvprodB.h, vvoffs.h):sat //[2]add the offset
}{
vout.ub = vasr(vprodC1.h, vprodC0.h, const7):rnd:sat //[3]
ptr_in = add(ptr_in, stride_in)
}{
if( q3 ) vmem(ptr_out+ #0) = vout // now store the last one
ptr_out = add(ptr_out, stride_out)
}: endloop1
{
jumpr r31
}
.balign 32
///////////////////////////////////////////
/// for when no scaling is needed - just a copy
/// This is also used for 1's complement:
/// p2 voffs
// 0 0 copy
// 1 -1 invert
///////////////////////////////////////////
.L_case_noS:
{
voffs = vnot(vzero) // voffs = -1 (only if p2)
loop1( .L_hloop_noS, height );
}{
if (!p2) voffs = vzero // restore voffs = 0 if !p2
}
.balign 32
.L_hloop_noS:
{
vinp.cur =vmem( ptr_in ++#1 ) // get first
vout = vxor(vinp,voffs)
loop0( .L_wloop_noS, rowloops )
if( p0 ) jump:nt .L_wloop0_noS // skip if loop count = 0
}
.balign 32
.L_wloop_noS:
{
vinp.cur = vmem(ptr_in++#1) // load next
vout = vxor(vinp,voffs)
vmem( ptr_out ++ #1) = vout // store current
}: endloop0
.L_wloop0_noS:
// now store the last one
{
ptr_in = add(ptr_in, stride_in)
if( q3 ) vmem(ptr_out+ #0) = vout
ptr_out = add(ptr_out, stride_out)
}: endloop1
{
jumpr r31
}
.L_end:
/*=============================================================================*/
.size scalemem_d32_hvx, .L_end-scalemem_d32_hvx
/*=============================================================================*/
|
XiaoMi/nnlib | 19,468 | hexagon/asm_src/fullconnlayerbatch_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : fullconnlayerbatched
*
* DESCRIPTION
* perform matrix multiply on activation and output relu data and max/min
*
* ARCHITECTURE : QDSP6V60+ + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/22/18 created
* DJH 10/21/18 stopped overreading past read arrays
* ------------------------------------------------------------- */
.text
.file "fullconnlayerbatch_h.S"
.global fullconnlayerbatch_asm
.balign 32
.type fullconnlay5rbatch_asm, @function
fullconnlayerbatch_asm:
#define pptr_xi r0 //pointer to array of pointers to activation batches
#define ptr_wi r1 //pointer to weight chunkoutput depth 32
#define pptr_zi r2 //pointer to array of pointers to output
#define in_depth_pad r3 //the number of elements in the input vector % 32 out depth by def 32
#define num_batches r4 //mult of 2 if odd ptr to alias
#define ptr_max r5 //the on going max and mins for the output
#define recip_level r6 //32bit coefficients 255 / (est_max - est_min)
#define bias_adjust r7 //typically sum of bias and sum of weights
#define actns_adjust r8 //used toadjust the product
#define woffset r26 //the byte offset from weight position
#define recip_shift r6 //shift for accumulator if outputs larger
#define batch_cnt r9 //number of batches to go
#define ptr_w r7 //current ptr to weights matrix
#define n r3 //num iterations
#define cntrl r9 //control value for populating vpredicate table
#define sel r27 //choose entry to predicate tab;e
#define xsum0 r6 //sum of activations batch 0
#define xsum1 r4 //sum of activations batch 1
#define ptr_x0 r10 //ptr to activations 0
#define ptr_x1 r11 //ptr to activations 1
#define d07654_d03210 r13:12 //actviastion values
#define d07654 r13 //actviastion values
#define d03210 r12 //actviastion values
#define d17654_d13210 r15:14 //actviastion values
#define d17654 r15 //actviastion values
#define d13210 r14 //actviastion values
#define d0fedc_d0ba98 r17:16 //actviastion values
#define d0fedc r17 //actviastion values
#define d0ba98 r16 //actviastion values
#define d1fedc_d1ba98 r19:18 //actviastion values
#define d1fedc r19 //actviastion values
#define d1ba98 r18 //actviastion values
#define fetch0 r20 //fetch ptr to even batch
#define fetch1 r21 //fetch ptr to odd batch
#define out_ptr0 r22 //even output batch ptr
#define out_ptr1 r23 //align output batch ptr
#define align0 r24 //output alignment
#define align1 r25 //output alignment
#define FETCH_INC #32 //
#define NULL #0 //NULL 000 ptr
#define RSS <<1:rnd:sat:shift //unverbose the insturction
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug reg
#define bias0 v12 //even bias values
#define bias1 v13 //odd bias values
#define sum00 v1 //even accumulator
#define sum10 v2 //odd accumulator
#define weight00 v3 //weights from fc layer
#define weight01 v3 //weights from fc layer
#define weight02 v3 //weights from fc layer
#define weight03 v3 //weights from fc layer
#define min0 v4 //min of accumulator
#define max0 v5 //max of accumulator
#define max0_min0 v5:4
#define min1 v6 //min of accumulator
#define max1 v7 //max of accumulator
#define max1_min1 v7:6
#define b0 v10 //even batch quantized outputs
#define b1 v11 //odd batch quantized outputs
#define vpred v0 //sum of vector predicates for cntrling output
#define wsum v8 //bias values
#define recipvec v9 //255/max quantized value
#define vshamt_vec v14 //splat quantized shift values
/* ----------------------------------------------------------------------- */
{ allocframe(#56) //reserve stack
num_batches = lsr(num_batches, #1) //itertn 2 batches per rounf
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
loop1(.L_batches, num_batches) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
n = lsr(in_depth_pad, #4) //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
n = add(n, #-1) //correct for pipeline
} {
max0 = vmem(ptr_max+#0 ) //
loop0(.L_matmul32, n) //[P, ]]
cntrl = #32 //
ptr_x0 = memw(pptr_xi++#1<<2) //[P, ]
} {
recip_shift = memw(sp+#20<<2) //
} {
vshamt_vec = vsplat(recip_shift) //
} {
ptr_x1 = memw(pptr_xi++#1<<2) //[P, ]
sel = ##0x01010101 //
q0 = vsetq(cntrl) //
} {
recip_level = memw(sp+#16<<2) //get quantize coeff
cntrl = #64 //
vpred = vand(q0, sel) //1___ v(32)
sel = add(sel, sel) //
} {
recipvec = vsplat(recip_level) //replicate to vector
bias_adjust = memw(sp+#17<<2) //
q1 = vsetq(cntrl) //
min0 = vmem(ptr_max+#1 ) //
} {
wsum = vmem(bias_adjust+#0) //[P, ]
actns_adjust = memw(sp+#18<<2) //
q0 = and(q1, !q0) //
cntrl = #96 //
} {
vpred |= vand(q0, sel) //_1__ v(64) & !v(32)
sel = add(sel, sel) //
q0 = vsetq(cntrl) //
batch_cnt = add(num_batches, #-1) //
} {
woffset = memw(sp+#19<<2) //
max1_min1 = vcombine(max0,min0) //
q1 = and(q0, !q1) //
dcfetch(ptr_x0+#0<<6) //[P, ]
} {
vpred |= vand(q1, sel) //__1_ v(96) & !v(64)
sel = add(sel, sel) //
q1 = not(q0) //
dcfetch(ptr_x1+#0<<6) //[P, ]
} {
vpred |= vand(q1, sel) //___1 !v(96)
sel = add(sel, sel) //
q1 = and(q0, !q0) //
fetch0 = add(ptr_x0, FETCH_INC) //[
} {
vpred |= vand(q1, sel) //sel = 0x10101010
fetch1 = ptr_x1 //[P, ]
xsum0 = memw(actns_adjust++#2<<2) //[P, ]batch 0 -sum
xsum1 = memw(actns_adjust+#1<<2) //[P, ]batch 1 - sum
}
/* ------------------------------------------------------------------------------ */
.balign 32
.L_batches:
{ d07654_d03210 = memd(ptr_x0++#1<<3) //[0, 0]read batch 0
d17654_d13210 = memd(ptr_x1++#1<<3) //[0, 1]read batch 1
sum00 = vsplat(xsum0) //[P, ]splat the sum of accs
sum10 = vsplat(xsum1) //[P, ]splat the sum of accs
} {
ptr_w = ptr_wi //[P, ]
d0fedc_d0ba98 = memd(ptr_x0++#1<<3) //[0, 2]read batch 0
sum00.w = vadd(sum00.w, wsum.w) //[P, ]set up accumulator 0
sum10.w = vadd(sum10.w, wsum.w) //[P, ]set up accumulator 1
}
/* ------------------------------------------------------------------------------ */
.balign 32
.L_matmul32: //to be urrolled but later
{ dcfetch(fetch0+#0<<6) //[0, 3]prefetch batch 0
fetch0 = add(fetch0, FETCH_INC) //[0, 3]increment fetch
fetch1 = add(fetch1, FETCH_INC) //[0, 3]increment fetch
d1fedc_d1ba98 = memd(ptr_x1++#1<<3) //[0, 3]read batch 1
} {
weight00.tmp = vmem(ptr_w++#1) //[0, 4]read weights
sum00.uw += vrmpy(weight00.ub, d03210.ub) //[0, 4]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight00.ub, d13210.ub) //[0, 4]do dotproduct of acts with matrix
dcfetch(fetch1+#0<<6) //[0, 4]prefetch batch 1
} {
weight01.tmp = vmem(ptr_w++#1) //[0, 5]read weights
sum00.uw += vrmpy(weight01.ub, d07654.ub) //[0, 5]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight01.ub, d17654.ub) //[0, 5]do dotproduct of acts with matrix
d07654_d03210 = memd(ptr_x0++#1<<3) //[1, 0]get batch input
} {
weight02.tmp = vmem(ptr_w++#1) //[0, 6]read weights
sum00.uw += vrmpy(weight02.ub, d0ba98.ub) //[0, 6]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight02.ub, d1ba98.ub) //[0, 6]do dotproduct of acts with matrix
d17654_d13210 = memd(ptr_x1++#1<<3) //[1, 1]get batch input
} {
weight03.tmp = vmem(ptr_w++#1) //[0, 7]read weights
sum00.uw += vrmpy(weight03.ub, d0fedc.ub) //[0, 7]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight03.ub, d1fedc.ub) //[0, 7]do dotproduct of acts with matrix
d0fedc_d0ba98 = memd(ptr_x0++#1<<3) //[1, 2]get batch input
}:endloop0
/* ------------------------------------------------------------------------------ */
{ weight00.tmp = vmem(ptr_w++#1) //[1, 3]read weights
sum00.uw += vrmpy(weight00.ub, d03210.ub) //[1, 3]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight00.ub, d13210.ub) //[1, 3]do dotproduct of acts with matrix
d1fedc_d1ba98 = memd(ptr_x1++#1<<3) //[1, 3]get batch input
} {
weight01.tmp = vmem(ptr_w++#1) //[1, 4]read weights
sum00.uw += vrmpy(weight01.ub, d07654.ub) //[1, 4]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight01.ub, d17654.ub) //[1, 4]do dotproduct of acts with matrix
out_ptr1 = memw(pptr_zi+#1<<2) //get next output batch ptr
} {
weight02.tmp = vmem(ptr_w++#1) //[1, 5]read weights
sum00.uw += vrmpy(weight02.ub, d0ba98.ub) //[1, 5]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight02.ub, d1ba98.ub) //[1, 5]do dotproduct of acts with matrix
p1 = cmp.eq(out_ptr1, NULL) //NULL ptr?
} {
weight03.tmp = vmem(ptr_w++#1) //[1, 6]read weights
sum00.uw += vrmpy(weight03.ub, d0fedc.ub) //[1, 6]do dotproduct of acts with matrix
sum10.uw += vrmpy(weight03.ub, d1fedc.ub) //[1, 6]do dotproduct of acts with matrix
out_ptr0 = memw(pptr_zi++#2<<2) //get next output batch ptr
} {
out_ptr0 = add(out_ptr0, woffset) //add the output weights offset
if(!p1) out_ptr1 = add(out_ptr1, woffset) //add the output weights offset if not NULL
p0 = cmp.eq(batch_cnt, #0) //are all batches computed?
sum00.w = vasl(sum00.w, vshamt_vec.w)
} {
sum10.w = vasl(sum10.w, vshamt_vec.w)
b0.w = vmpye(sum00.w, recipvec.uh) //[E, ]quantize
align0 = extractu(out_ptr0, #2, #5) //xx00000
if(!p0) ptr_x0 = memw(pptr_xi++#1<<2) //[P, ]get next evwen batch
} {
b0.w+= vmpyo(sum00.w, recipvec.h):RSS //[E, ]quantize
if(!p0) ptr_x1 = memw(pptr_xi++#1<<2) //[P, ]ptr to next patch
} {
loop0(.L_matmul32, n) //[P, ]next batches
dcfetch(ptr_x0+#0<<6) //[P, ]fetch next batch
align0 = lsl(#1, align0) //convert to power of 2
batch_cnt = add(batch_cnt, #-1) //decrement batch count
} {
min0.w = vmin(min0.w, b0.w) //[E, ]update even min
b1.w = vmpye(sum10.w, recipvec.uh) //[E, ]quantize
dcfetch(ptr_x1+#0<<6) //[P, ]1st fetch
align1 = extractu(out_ptr1, #2, #5) //xx00000
} {
b1.w+= vmpyo(sum10.w, recipvec.h):RSS //[E, ]quantize
align0 = vsplatb(align0) //create table lookup cntrls
fetch0 = add(ptr_x0, FETCH_INC) //initialize fetcvh
fetch1 = ptr_x1 //[P, ]initialize fetcvh
} {
min1.w = vmin(min1.w, b1.w) //[E, ]update odd min
max0.w = vmax(max0.w, b0.w) //[E, ]update even max
align1 = lsl(#1, align1) //convert to power of 2
b0.h = vpack(b0.w, b0.w):sat //[E, ]#>>16
} {
max1.w = vmax(max1.w, b1.w) //[E, ]update odd max
b0.ub = vpack(b0.h, b0.h):sat //[E, ]16 to 8 sat
q0 = vand(vpred, align0) //access even alignment cntrl
align1 = vsplatb(align1) //create table lookup cntrls
} {
if(q0) vmem(out_ptr0) = b0 //[E, ]store and increment next batch
b1.h = vpack(b1.w, b1.w):sat //[E, ]pack to hwords
if(p1) align1 = #0 //sel //dont store
if(p1) max1_min1 = vcombine(max0,min0) //if null ptr filter out max/min
} {
q1 = vand(vpred, align1) //get odd alignment vpred
if(p1) out_ptr1 = out_ptr0 // use valid address for empty vstore
b1.ub = vpack(b1.h, b1.h):sat //[E, ]16 to 8 sat
if(!p0) xsum0 = memw(actns_adjust++#1<<2) //[P, ]batch 0 -sum
} {
min0.w = vmin(min0.w, min1.w) //[E, ]merge mins
max0.w = vmax(max0.w, max1.w) //[E, ]merge maxs
if(q1) vmem(out_ptr1) = b1 //[E, ]store and increment next batch
if(!p0) xsum1 = memw(actns_adjust++#1<<2) //[P, ]batch 1 - sum
}:endloop1
/* ------------------------------------------------------------------------------ */
{ r17:16 = memd(sp+#0) //restore r16, r17from stack
r19:18 = memd(sp+#8) //restore r18,r19
} {
r21:20 = memd(sp+#16) //restore r20,r11
vmem(ptr_max+#0) = max0 //[E, 0]32max
} {
r23:22 = memd(sp+#24) //restore r22,r13
vmem(ptr_max+#1) = min0 //[E, 0]32min
} {
r25:24 = memd(sp+#32) //restore r24,r15
r27:26 = memd(sp+#40) //restore r26,r17
} {
dealloc_return //restore fram and return
}
.L_end:
/* ------------------------------------------------------------------------------ */
.size fullconnlayerbatch_asm, .L_end-fullconnlayerbatch_asm
/* ------------------------------------------------------------------------------ */
|
XiaoMi/nnlib | 9,047 | hexagon/asm_src/quantize_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* */
/*======================================================================*/
/* FUNCTIONS : quantize_asm */
/* */
/* DESCRIPTION */
/* Compute quantized range from in range to 8bit unsigned*/
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 09/12/16 created */
/*======================================================================*/
/* IDEAL-LIST SCHEDULED CYCLE-COUNT: */
/* -> N/32+7 */
/* */
/* MEMORY */
/* CODESIZE = 272 bytes */
/* ASSUMPTIONS */
/* y and x are 128 byte aligned */
/* C MODEL */
/*======================================================================*/
#if 0
void quantize_cn(unsigned char * out, int * input, int in_max_val,
int in_min_val, float in_level_size, int n)
{
float out_min;
float out_max;
int i;
/* Make sure min val <= 0.0 in floaty land */
out_min = in_level_size * (float)in_min_val;
out_max = in_level_size * (float)in_max_val;
if (out_min > 0.0f) out_min = 0.0f;
/* Requantize with new range */
for (i = 0; i < n; i++) {
/* We want 0.0 -- 255.0 to resize to 0..255 */
float resize_amt = 255.f/(out_max-out_min);
float value_f = (in_level_size*input[i] - out_min) * resize_amt;
int value_i = roundf(value_f);
out[i] = (value_i < 0) ? 0 : ((value_i > 255) ? 255 : value_i);
}
return;
}
#endif
/*======================================================================*/
.global quantize_asm
.balign 32
.type quantize_asm, @function
quantize_asm:
/* ==================================================================== */
#define ptr_x r0 //pointer to input
#define offset r1 //qpoint of input data
#define gain r2 //output data
#define ptr_y r3 //
#define n r4 //number of points > 128
#define lsb7 r5
#define u0 v0
#define u1 v1
#define u2 v2
#define u3 v3
#define w0 v4
#define w1 v5
#define w2 v6
#define w3 v7
#define voffset v8
#define vgain v9
#define h3h1 v10
#define h2h0 v11
#define b3b2b1b0 v12
/* ==================================================================== */
{ voffset = vsplat(offset) //[P, ]
q0 = vsetq(n) //
lsb7 = #127 //
} {
u0.tmp = vmem(ptr_x++#1) //[0, 0]
u0.w = vsub(u0.w, voffset.w):sat //[0, 0]
p0 = bitsclr(n, lsb7) //
} {
vgain = vsplat(gain) //[P, ]
n = add(n, #127) //
} {
w0.w =vmpye(u0.w,vgain.uh) //[0, 2]
u1.tmp = vmem(ptr_x++#1) //[0, 2]
u1.w = vsub(u1.w, voffset.w):sat //[0, 2]
n = lsr(n, #7) //[P, 7]
} {
w0.w+=vmpyo(u0.w,vgain.h):<<1:rnd:sat:shift //[0, 3]
if(!p0) n = add(n, #-1) //
} {
w1.w =vmpye(u1.w,vgain.uh) //[0, 4]
u2.tmp = vmem(ptr_x++#1) //[0, 4]
u2.w = vsub(u2.w, voffset.w):sat //[0, 4]
} {
w1.w+=vmpyo(u1.w,vgain.h):<<1:rnd:sat:shift //[0, 5]
loop0(.L_loopN, n) //[P,10]
}
/* ==================================================================== */
.balign 32
.L_loopN:
{
w2.w =vmpye(u2.w,vgain.uh) //[0, 6]
u3.tmp = vmem(ptr_x++#1) //[0, 6]
u3.w = vsub(u3.w, voffset.w):sat //[0, 6]
} {
h2h0.h = vpack(w1.w, w0.w):sat //[0, 7]
w2.w+=vmpyo(u2.w,vgain.h):<<1:rnd:sat:shift //[0, 7]
} {
w3.w =vmpye(u3.w,vgain.uh) //[0, 8]
u0.tmp = vmem(ptr_x++#1) //[1, 0]
u0.w = vsub(u0.w, voffset.w):sat //[1, 0]
} {
w3.w+=vmpyo(u3.w,vgain.h):<<1:rnd:sat:shift //[0, 9]
} {
w0.w =vmpye(u0.w,vgain.uh) //[1, 2]
u1.tmp = vmem(ptr_x++#1) //[1, 2]
u1.w = vsub(u1.w, voffset.w):sat //[1, 2]
} {
w0.w+=vmpyo(u0.w,vgain.h):<<1:rnd:sat:shift //[1, 3]
h3h1.h = vpack(w3.w, w2.w):sat //[0,11]
} {
w1.w =vmpye(u1.w,vgain.uh) //[1, 4]
u2.tmp = vmem(ptr_x++#1) //[1, 4]
u2.w = vsub(u2.w, voffset.w):sat //[1, 4]
} {
b3b2b1b0.ub = vpack(h3h1.h, h2h0.h):sat //[0,13]
vmem(ptr_y++#1) = b3b2b1b0.new //[0,13]
w1.w+=vmpyo(u1.w,vgain.h):<<1:rnd:sat:shift //[1, 5]
}:endloop0
/*======================================================================*/
{
w2.w =vmpye(u2.w,vgain.uh) //[1, 6]
u3.tmp = vmem(ptr_x++#1) //[1, 6]
u3.w = vsub(u3.w, voffset.w):sat //[1, 6]
}{
if(p0) jumpr r31 //[E, 6]
}{
} {
h2h0.h = vpack(w1.w, w0.w):sat //[1, 7]
w2.w+=vmpyo(u2.w,vgain.h):<<1:rnd:sat:shift //[1, 7]
} {
w3.w =vmpye(u3.w,vgain.uh) //[1, 8]
} {
w3.w+=vmpyo(u3.w,vgain.h):<<1:rnd:sat:shift //[1, 9]
} {
h3h1.h = vpack(w3.w, w2.w):sat //[1,11]
} {
b3b2b1b0.ub = vpack(h3h1.h, h2h0.h):sat //[1,13]
} {
if(q0) vmem(ptr_y+#0) = b3b2b1b0 //[1,13]
}{
jumpr r31 //Q
}
/*======================================================================*/
.L_end:
.size quantize_asm, .L_end-quantize_asm
|
XiaoMi/nnlib | 45,434 | hexagon/asm_src/gvconv2dbbb_circ_d64_v65_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Memory
CODESIZE 1524 bytes
STACK 112 bytes
Description
Utilize the v65 vrmpy instructions. Common wiehgts with 2 inputs and 2 outputs. 2 data inputs
are in a pair. Key is to feed each input with a different stream. Solution is to shuffle the
stream with a delayed version of itself. This doubles the size of the activations so a
smaller circular buffer of size filt_height*input depth*width*2.
Example depth = 16 shuffle blocks of 4 bytes together e.g. x00 =[x00.0,x00.1,x00.2,x00.3]
x00 x01 x02 x03|x10 x11 x12 x13|x20 x21 x22 x23|x30 x31 x32 x33
x40 x41 x42 x43|x50 x51 x52 x53|x60 x61 x62 x63|x70 x71 x72 x73
x80 x81 x82 x83|x90 x91 x92 x93|xa0 xa1 xa2 xa3|xb0 xb1 xb2 xb3
xc0 xc1 xc2 xc3|xd0 xd1 xd2 xd3|xe0 xe1 xe2 xe3|xf0 xf1 xf2 xf3
to
x00 x40 x01 x41 x02 x42 x03 x43|x10 x50 x11 x51 x12 x52 x13 x53|
x20 x60 x21 x61 x22 x62 x23 x63|x30 x70 x31 x71 x32 x72 x33 x73|
x40 x80 x41 x81 x42 x82 x43 x83|x50 x90 x51 x91 x52 x92 x53 x93|
x60 xa0 x61 xa1 x62 xa2 x63 xa3|x70 xb0 x71 xb1 x72 xb2 x73 xb3|
x80 xc0 x81 xc1 x82 xc2 x83 xc3|x90 xd0 x91 xd1 x92 xd2 x93 xd3|
xa0 xe0 xa1 xe1 xa2 xe2 xa3 xe3|xb0 xf0 xb1 xf1 xb2 xf2 xb3 xf3|
xc0 xc1 xc2 xc3 |xd0 xd1 xd2 xd3 |
xe0 xe1 xe2 xe3 |xf0 xf1 xf2 xf3 |
So each memd access into the buffer access two streams which are delayed from each other.
While this is occuring the sequence can be aligned so that the extra computation on the
ends can be minimized.
To further minimize memory the circular buffer is updated inside the kernel each
line.
This version consumes 2 sets fo weights (64) each time and produces 2
rows of the output at once.
*/
/*===============================================================================*/
.text
.file "gvconv2dbbb_circ_d64_v65_h.S"
.global gvconv2dbbb_circ_d64_v65_asm
.balign 32
.type gvconv2dbbb_circ_d64_v65_asm, @function
gvconv2dbbb_circ_d64_v65_asm:
/*===============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/*===============================================================================*/
/* ---------------------------------- CALL REGS -------------------------------- */
#define ptr_xi r0 //12 activation data
#define ptr_wi r1 //13 weights
#define ptr_zi r2 //14 results
#define next_inbuf_width r3 //(pad_l+in_width+pad_r)
#define out_width_depth r4 //next line amount
#define out_width r5 //15 amount of work to be done
#define stride_h_w r6 //30 stride_height, stride_width
#define in_depth r22 //31 input depth multiples of 32
#define filt_width r23 //32 horizontal fuilter width
#define filt_height r8 //33 filt_height lines per filter
#define out_height r9 //34 number of vertical lines to perform
#define ptr_filtsum r24 //35 includes the computation filt_sum * in_offset + biasvec
#define ptr_max r31 //36 maximum and minum buffer
#define recip_level r26 //37 255 / (MAX - MIN) - used to scale to bytes
#define out_width_32 r7 //actual out_width in depth32
#define ptr_cbufi r16 //40 read buffer pointer
#define zshift r6 //41 extra shift on output before quantization
#define in_zero r25 //42
//#define ptr_equalize r17 //43
#define cbuf_eob r27 //18 end of cuirc buffer
#define cbuf_size r28 //19 size in bytes of circ buf -1
#define weight_stride r15 //22 distance to next set of weights
/* --------------------------------- SCALER REGS ------------------------------- */
#define cm4 r2 //shuffle/deal ints
#define col_count r2 //horizontal counter
#define in_width_32 r3 //total input width in bytes in buffer
#define x70_x30 r15:14 //4n+0 inputs
#define x60_x20 r17:16 //4n+0 inputs
#define x50_x10 r19:18 //4n+0 inputs
#define x40_x00 r15:14 //4n+0 inputs
#define x71_x31 r17:16 //4n+1 inputs
#define x61_x21 r15:14 //4n+1 inputs
#define x51_x11 r19:18 //4n+1 inputs
#define x41_x01 r17:16 //4n+1 inputs
#define ptr_wi_ptr_xi r1:0 //
#define fetch_ptr_base r1 //base pointer for l1 prefetch
#define fetch_ptr r10 //current pointer for l1 prefetch
#define stride3 r11 //3*stride
#define stride r12 //current to next input
#define ptr_x0 r26 //base input pointer
#define ptr_x1 r13 //current input ptr
#define ptr_w0 r20 //13 even output depth 32 weights
#define ptr_w1 r21 //20 odd output depth 32 weights
#define ptr_z0 r0 //even output depth 32 outputs
#define ptr_z1 r25 //21 write buffer sp position and odd output depth 32 outputs
#define adjust r10 //
#define delta r7 //difference between filt height and stride height
#define out_width_64 r22 //actual out_width in depth32 *2
/* ---------------------------------- VEC REGS -------------------------------- */
#define x3x2x1x0 v10 //input data
#define x7x6x5x4 v11 //next input data
#define y3y2y1y0 v14 //aligned input data
#define y7y6y5y4 v15 //delayed aligned inout data
#define y7y6y5y4_y3y2y1y0 v15:14 //aligned data
#define ybyay9y8 v16 //delayed by 2 aligned data
#define z73z62 v13 //shuffled delayed input
#define z51z40 v12 //shuffled delayed input
#define z73z62_z51z40 v13:12 //shuffled output data
/* ---------------------------------------------------------------------------- */
#define tmax v18 //
#define tmin v19 //
#define wscale v11
#define s17_s13 v25:24 //odd output accs 3,7
#define s16_s12 v23:22 //odd output accs 2,6
#define s15_s11 v21:20 //odd output accs 1,5
#define s14_s10 v19:18 //odd output accs 0,4
#define s07_s03 v17:16 //even output accs 3,7
#define s06_s02 v15:14 //even output accs 2,6
#define s05_s01 v13:12 //even output accs 1,5
#define s04_s00 v11:10 //even output accs 0,4
#define s17 v25 //odd acc 7
#define s16 v23 //odd acc 6
#define s15 v21 //odd acc 5
#define s14 v19 //odd acc 4
#define s07 v17 //even acc 7
#define s06 v15 //even acc 6
#define s05 v13 //even acc 5
#define s04 v11 //even acc 4
#define s13 v24 //odd acc 3
#define s12 v22 //odd acc 2
#define s11 v20 //odd acc 1
#define s10 v18 //odd acc 0
#define s03 v16 //even acc 3
#define s02 v14 //even acc 2
#define s01 v12 //even acc 1
#define s00 v10 //even acc 0
// note w00,w01,w10,w11 are only used as .tmp destinations.
#define w00 v0 //weights even 0-31
#define w01 v0 //weights even 32-63
#define w10 v0 //weights odd 0-31
#define w11 v0 //weights odd 32-63
#define vrecip0 v1 //reciprocal 255/MAx replicated
#define vrecip1 v30 //reciprocal 255/MAx replicated
#define s0_sh v8 //shifted value
#define s1_sh v26 //shifted value
#define wsum0 v2 //sum of weights column + bias add 0-31
#define wsum1 v3 //sum of weights column + bias add 32-63
#define d010 v27 //even lines upper 16bit packed accs 0,1
#define d032 v28 //even lines upper 16bit packed accs 2,3
#define d03210 v28 //8bit shifted, packed saturated 0-3
#define d054 v29 //even lines upper 16bit packed accs 4,5
#define d076 v31 //even lines upper 16bit packed accs 6,7
#define d07654 v31 //8bit shifted, packed saturated 4-7
#define d110 v27 //odd lines upper 16bit packed accs 0,1
#define d132 v28 //odd lines upper 16bit packed accs 2,3
#define d13210 v28 //8bit shifted, packed saturated 0-3
#define d154 v29 //odd lines upper 16bit packed accs 4,5
#define d176 v31 //odd lines upper 16bit packed accs 6,7
#define d17654 v31 //8bit shifted, packed saturated 4-7
#define maxo_maxe v5:4 //packed maxes
#define maxo v5 //odd maxes
#define maxe v4 //even maxes
#define mino_mine v7:6 //packed mins
#define mino v7 //odd mins
#define mine v6 //even mins
#define stmp0 v2 //0 // temp for min/max
#define stmp1 v3 //9 // temp for min/max
#define gmax v0 //
#define gmin v9 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/* --------------------------------------------------------------------------- */
{ allocframe(#112) //0th entry on stack is 112+8)/4=30 ints
stride_h_w = memw(sp+#0<<2) //stride horizontl and vertical
} {
memd(sp+#4<<2) = r21:20 //save 20,21
memd(sp+#6<<2) = r23:22 //save 22,23
r23 = #0x80000001
} {
memd(sp+#0<<2) = r17:16 //save 16,17
memd(sp+#2<<2) = r19:18 //save 18,19
maxe = vsplat(r23) // maxe <- -0x7fffffff
} {
memd(sp+#8<<2) = r25:24 //save 24,25
memd(sp+#10<<2) = r27:26 //save 26,27
mine.w = vabs(maxe.w) // mine <- +0x7fffffff
} {
filt_height = memw(sp+#33<<2) //filter height
memw(sp+#15<<2) = out_width //save output width
stride = zxth(stride_h_w) //horizontal stride
} {
in_depth = memw(sp+#31<<2) //input depth
filt_width = memw(sp+#32<<2) //filter width
} {
ptr_max = memw(sp+#36<<2) //get max/min ptr
cbuf_size = mpyi(filt_height, in_depth) //circular buffer size
stride = asl(stride, #5) //32 * stride_w
dcfetch(ptr_xi+#0<<6) //
} {
out_height = memw(sp+#34<<2) //height of output
in_zero = memw(sp+#41<<2) //
stride3 = addasl(stride, stride, #1) //3Xstride
cbuf_size = mpyi(cbuf_size, next_inbuf_width) //circular buffer size
} {
cbuf_size = add(cbuf_size, cbuf_size) //x2
in_zero = vsplatb(in_zero) //
memd(sp+#12<<2) = ptr_wi_ptr_xi //save weights:activation
gmax = vmem(ptr_max+#0) //
} {
weight_stride=mpy(filt_width.L,filt_height.L) //offset between filter rows
filt_width = asl(filt_width, #2) //*32/8
gmin = vmem(ptr_max+#1) //
memw(sp+#14<<2) = ptr_zi //save output ptr
} {
weight_stride=mpy(weight_stride.L,in_depth.L) //distance between weight rows
recip_level = memw(sp+#37<<2) //255/max
ptr_cbufi = memw(sp+#39<<2) //circular buffer
} {
out_width_32 = memw(sp+#38<<2) //total width of output
cbuf_eob = add(ptr_cbufi, cbuf_size) //end of circ buffer marker
dcfetch(ptr_xi+#1<<6) //
} {
memw(sp+#21<<2) = ptr_cbufi //cbuf write ptr
filt_width = add(filt_width, #-1) //account for epilog
ptr_wi += asl(weight_stride,#5) //weights stride
filt_height = mpyi(filt_height, in_depth) //total number of depth32 filter rows
} {
weight_stride = asl(weight_stride, #6) //
col_count = memw(sp+#15<<2) //initialize width count
cbuf_eob = add(cbuf_eob, #-4) //make so comparison is >= eob
dcfetch(ptr_xi+#2<<6) //
} {
filt_height = lsr(filt_height, #5) //num d32 rows in filter
out_width_64 = add(out_width_32, out_width_32) //
ptr_filtsum = memw(sp+#35<<2) //ptr to the sum of filters+offset
dcfetch(ptr_xi+#3<<6) //
} {
memw(sp+#20<<2) = ptr_wi //spill weight stride for later
memw(sp+#24<<2) = weight_stride //
in_width_32 = asl(next_inbuf_width, #6) //next d32 line x 2
mino = mine //
} {
vrecip0 = vmem(recip_level++#2) //used to compress to 8bits 255/max
} {
filt_height = add(filt_height, #-1) //
vrecip1 = vmem(recip_level+#-1) //used to compress to 8bits 255/max
memw(sp+#37<<2) = recip_level //255/max
}
/* -------------------------------------------------------------------------- */
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#12<<2) //ptr_x0=ptr_cbufi read circ buffer
out_height = add(out_height, #-1) //decrement height count
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
maxo = maxe
} {
ptr_z0 = memw(sp+#14<<2) //output ptr for even lines
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
wsum1 = vmem(ptr_filtsum+#1) //set 2nd weight offset
nop
} {
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //if prefetch >= circ buffer wrap around
if(p1.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size) //wrap fetch ptr around independently
s06_s02 = vcombine(wsum0,wsum0) //init sum2 and 6
s07_s03 = vcombine(wsum0,wsum0) //init sum3 and 7
} {
loop1(.L_filt_height, filt_height) //setup vertical filte rloop
s04_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
s05_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
ptr_z1 = add(ptr_z0, out_width_32) //next output depth32 (as outdepth 64)
} {
s16_s12 = vcombine(wsum1,wsum1) //init sum 2 and 6
s17_s13 = vcombine(wsum1,wsum1) //init sum 3 and 7
ptr_w0 = memw(sp+#13<<2) //access ptr weight
p3 = sp1loop0(.L_filt_width, filt_width) //setup inner filter loop
} {
memw(sp+#14<<2) += out_width_64 //update output ptr
s14_s10 = vcombine(wsum1,wsum1) //init sum 0 and 4
s15_s11 = vcombine(wsum1,wsum1) //init sum 1 and 5
ptr_w1 = memw(sp+#20<<2) //access weights stride
}
.balign 32
.L_width:
.L_filt_height:
{ x70_x30 = memd(ptr_x0+stride3<<#1) //[0, 0]load pt 3 and 7
fetch_ptr = add(fetch_ptr_base, #0) //initial fetch ptr
p0 = cmp.eq(filt_height, #0) //
if(p0.new) jump:nt .L_last1
} {
x60_x20 = memd(ptr_x0+stride<<#2) //[0, 2]load pt 2 and 6
ptr_x1 = ptr_x0 //set up currne tinput ptr
ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
fetch_ptr_base=add(fetch_ptr_base,in_width_32) //if >= buf_size -= buf_size
}
.balign 32
.L_filt_width:
{
dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0, 2]inc fetch by 32/64 bytes (1 line)
nop; nop
} {
w00.tmp = vmem(ptr_w0+#0) //[0, 3]1st 32 weights of out depth
s06_s02.w+= vrmpy(w00.b, x60_x20.ub) //[0, 3]macc 2,6 out 0
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
x50_x10 = memd(ptr_x1+stride<<#1) //[0, 3]load pt 1 5
} {
w10.tmp = vmem(ptr_w1+#0) //[0, 4]1st 32 weights stream 1
s16_s12.w += vrmpy(w10.b, x60_x20.ub) //[0, 4]acc 2,6 out 1
s17_s13.w += vrmpy(w10.b, x70_x30.ub) //[0, 4]acc 3,7 out 1
x40_x00 = memd(ptr_x1++#1<<3) //[0, 4]load pts 0, 4
} {
w00.tmp = vmem(ptr_w0++#1) //[0, 5]same 1st 32weights stream 0
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 5]acc 0,4,1,5 out 0
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
x71_x31 = memd(ptr_x1+stride3<<#1) //[0, 5]
} {
w10.tmp = vmem(ptr_w1++#1) //[0, 6]same 1st 32weight stream 1
s14_s10.w += vrmpy(w10.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 stream 1
s15_s11.w += vrmpy(w10.b, x50_x10.ub) //[0, 6]
x61_x21 = memd(ptr_x1+stride<<#2) //[0, 6]
} {
w01.tmp = vmem(ptr_w0+#0) //[0, 7]2nd 32weights stream 0
s06_s02.w += vrmpy(w01.b, x61_x21.ub) //[0, 7]acc 2,3,6,7
s07_s03.w += vrmpy(w01.b, x71_x31.ub) //[0, 7]
x51_x11 = memd(ptr_x1+stride<<#1) //[0, 7]
} {
w11.tmp = vmem(ptr_w1+#0) //[0, 8]2nd 32weights of stream 1
s16_s12.w += vrmpy(w11.b, x61_x21.ub) //[0, 8]
s17_s13.w += vrmpy(w11.b, x71_x31.ub) //[0, 8]
x41_x01 = memd(ptr_x1++#1<<3) //[0, 8]
} {
w01.tmp = vmem(ptr_w0++#1) //[0, 9]same 2nd 32weights stream 0
s04_s00.w += vrmpy(w01.b, x41_x01.ub) //[0, 9]
s05_s01.w += vrmpy(w01.b, x51_x11.ub) //[0, 9]
x70_x30 = memd(ptr_x1+stride3<<#1) //[1, 0]
} {
w11.tmp = vmem(ptr_w1++#1) //[0,10]same 2nd 32weights stream 1
s14_s10.w += vrmpy(w11.b, x41_x01.ub) //[0,10]
s15_s11.w += vrmpy(w11.b, x51_x11.ub) //[0,10]
x60_x20 = memd(ptr_x1+stride<<#2) //[1, 1]
}:endloop0
{ dcfetch(fetch_ptr+#0<<6) //[0, 0]fetch 64bytes-2 lots 8 x 4 bytes
p3 = sp1loop0(.L_filt_width, filt_width) //set up inne rloop for next time
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //[E,10]
if(p1.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size)//[E,10]wrap around end fetch ptr
} {
w00.tmp = vmem(ptr_w0+#0) //[1, 3]1st 32 weights of out depth
s06_s02.w+= vrmpy(w00.b, x60_x20.ub) //[1, 3]acc 2,3,6,7 out 0
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[1, 3]
x50_x10 = memd(ptr_x1+stride<<#1) //[1, 3]
} {
w10.tmp = vmem(ptr_w1+#0) //[1, 4]1st 32 weights stream 1
s16_s12.w += vrmpy(w10.b, x60_x20.ub) //[1, 4]acc 2,3,6,7 out 1
s17_s13.w += vrmpy(w10.b, x70_x30.ub) //[1, 4]
x40_x00 = memd(ptr_x1++#1<<3) //[1, 4]
} {
w00.tmp = vmem(ptr_w0++#1) //[1, 5]same 1st 32weights stream 0
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[1, 5]acc 0,4,1,5 out 0
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[1, 5]
x71_x31 = memd(ptr_x1+stride3<<#1) //[1, 5]
} {
w10.tmp = vmem(ptr_w1++#1) //[1, 6]same 1st 32weight stream 1
s14_s10.w += vrmpy(w10.b, x40_x00.ub) //[1, 6]acc 0,4,1,5 stream 1
s15_s11.w += vrmpy(w10.b, x50_x10.ub) //[1, 6]
x61_x21 = memd(ptr_x1+stride<<#2) //[1, 6]
} {
w01.tmp = vmem(ptr_w0+#0) //[1, 7]2nd 32weights stream 0
s06_s02.w += vrmpy(w01.b, x61_x21.ub) //[1, 7]acc 2,3,6,7
s07_s03.w += vrmpy(w01.b, x71_x31.ub) //[1, 7]
x51_x11 = memd(ptr_x1+stride<<#1) //[1, 7]
} {
w11.tmp = vmem(ptr_w1+#0) //[1, 8]2nd 32weights of stream 1
s16_s12.w += vrmpy(w11.b, x61_x21.ub) //[1, 8]
s17_s13.w += vrmpy(w11.b, x71_x31.ub) //[1, 8]
x41_x01 = memd(ptr_x1++#1<<3) //[1, 8]
} {
w01.tmp = vmem(ptr_w0++#1) //[1, 9]same 2nd 32weights stream 0
s04_s00.w += vrmpy(w01.b, x41_x01.ub) //[1, 9]
s05_s01.w += vrmpy(w01.b, x51_x11.ub) //[1, 9]
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
} {
w11.tmp = vmem(ptr_w1++#1) //[1,10]same 2nd 32weights stream 1
s14_s10.w += vrmpy(w11.b, x41_x01.ub) //[1,10]
s15_s11.w += vrmpy(w11.b, x51_x11.ub) //[1,10]
if(p0)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
}:endloop1
.L_last1:
{ p3 = sp1loop0(.L_filt_width1, filt_width) //set up inne rloop for next time
ptr_x1 = ptr_x0 //set up currne tinput ptr
ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
} {
x70_x30 = memd(ptr_x1+stride3<<#1) //[0, 0]load pt 3 and 7
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
if(p0.new)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
} {
x60_x20 = memd(ptr_x1+stride<<#2) //[0, 2]load pt 2 and 6
fetch_ptr = addasl(ptr_x0, stride, #4) //initial fetch ptr
}
.balign 32
.L_filt_width1:
{ dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0, 2]inc fetch by 32/64 bytes (1 line)
} {
w00.tmp = vmem(ptr_w0+#0) //[0, 3]1st 32 weights of out depth
s06_s02.w+= vrmpy(w00.b, x60_x20.ub) //[0, 3]macc 2,6 out 0
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
x50_x10 = memd(ptr_x1+stride<<#1) //[0, 3]load pt 1 5
} {
w10.tmp = vmem(ptr_w1+#0) //[0, 4]1st 32 weights stream 1
s16_s12.w += vrmpy(w10.b, x60_x20.ub) //[0, 4]acc 2,6 out 1
s17_s13.w += vrmpy(w10.b, x70_x30.ub) //[0, 4]acc 3,7 out 1
x40_x00 = memd(ptr_x1++#1<<3) //[0, 4]load pts 0, 4
} {
w00.tmp = vmem(ptr_w0++#1) //[0, 5]same 1st 32weights stream 0
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 5]acc 0,4,1,5 out 0
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
x71_x31 = memd(ptr_x1+stride3<<#1) //[0, 5]
} {
w10.tmp = vmem(ptr_w1++#1) //[0, 6]same 1st 32weight stream 1
s14_s10.w += vrmpy(w10.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 stream 1
s15_s11.w += vrmpy(w10.b, x50_x10.ub) //[0, 6]
x61_x21 = memd(ptr_x1+stride<<#2) //[0, 6]
} {
w01.tmp = vmem(ptr_w0+#0) //[0, 7]2nd 32weights stream 0
s06_s02.w += vrmpy(w01.b, x61_x21.ub) //[0, 7]acc 2,3,6,7
s07_s03.w += vrmpy(w01.b, x71_x31.ub) //[0, 7]
x51_x11 = memd(ptr_x1+stride<<#1) //[0, 7]
} {
w11.tmp = vmem(ptr_w1+#0) //[0, 8]2nd 32weights of stream 1
s16_s12.w += vrmpy(w11.b, x61_x21.ub) //[0, 8]
s17_s13.w += vrmpy(w11.b, x71_x31.ub) //[0, 8]
x41_x01 = memd(ptr_x1++#1<<3) //[0, 8]
} {
w01.tmp = vmem(ptr_w0++#1) //[0, 9]same 2nd 32weights stream 0
s04_s00.w += vrmpy(w01.b, x41_x01.ub) //[0, 9]
s05_s01.w += vrmpy(w01.b, x51_x11.ub) //[0, 9]
x70_x30 = memd(ptr_x1+stride3<<#1) //[1, 0]
} {
w11.tmp = vmem(ptr_w1++#1) //[0,10]same 2nd 32weights stream 1
s14_s10.w += vrmpy(w11.b, x41_x01.ub) //[0,10]
s15_s11.w += vrmpy(w11.b, x51_x11.ub) //[0,10]
x60_x20 = memd(ptr_x1+stride<<#2) //[1, 1]
}:endloop0
{ w00.tmp = vmem(ptr_w0+#0) //[1, 3]1st 32 weights of out depth
s06_s02.w+= vrmpy(w00.b, x60_x20.ub) //[1, 3]acc 2,3,6,7 out 0
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[1, 3]
x50_x10 = memd(ptr_x1+stride<<#1) //[1, 3]
} {
w10.tmp = vmem(ptr_w1+#0) //[1, 4]1st 32 weights stream 1
s16_s12.w += vrmpy(w10.b, x60_x20.ub) //[1, 4]acc 2,3,6,7 out 1
s17_s13.w += vrmpy(w10.b, x70_x30.ub) //[1, 4]
x40_x00 = memd(ptr_x1++#1<<3) //[1, 4]
} {
w00.tmp = vmem(ptr_w0++#1) //[1, 5]same 1st 32weights stream 0
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[1, 5]acc 0,4,1,5 out 0
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[1, 5]
x71_x31 = memd(ptr_x1+stride3<<#1) //[1, 5]
} {
w10.tmp = vmem(ptr_w1++#1) //[1, 6]same 1st 32weight stream 1
s14_s10.w += vrmpy(w10.b, x40_x00.ub) //[1, 6]acc 0,4,1,5 stream 1
s15_s11.w += vrmpy(w10.b, x50_x10.ub) //[1, 6]
x61_x21 = memd(ptr_x1+stride<<#2) //[1, 6]
} {
w01.tmp = vmem(ptr_w0+#0) //[1, 7]2nd 32weights stream 0
s06_s02.w += vrmpy(w01.b, x61_x21.ub) //[1, 7]acc 2,3,6,7
s07_s03.w += vrmpy(w01.b, x71_x31.ub) //[1, 7]
x51_x11 = memd(ptr_x1+stride<<#1) //[1, 7]
} {
w11.tmp = vmem(ptr_w1+#0) //[1, 8]2nd 32weights of stream 1
s16_s12.w += vrmpy(w11.b, x61_x21.ub) //[1, 8]
s17_s13.w += vrmpy(w11.b, x71_x31.ub) //[1, 8]
x41_x01 = memd(ptr_x1++#1<<3) //[1, 8]
} {
w01.tmp = vmem(ptr_w0++#1) //[1, 9]same 2nd 32weights stream 0
s04_s00.w += vrmpy(w01.b, x41_x01.ub) //[1, 9]
s05_s01.w += vrmpy(w01.b, x51_x11.ub) //[1, 9]
dcfetch(fetch_ptr+#0<<6) //[0, 0]fetch 64bytes-2 lots 8 x 4 bytes
} {
w11.tmp = vmem(ptr_w1++#1) //[1,10]same 2nd 32weights stream 1
s14_s10.w += vrmpy(w11.b, x41_x01.ub) //[1,10]
s15_s11.w += vrmpy(w11.b, x51_x11.ub) //[1,10]
p2 = cmp.gt(col_count, #1) // do we need column 1?
}
/* ------------------------------------------------------------------------ */
{ zshift = memw(sp+#40<<2) //final shift 7 + 16
stmp0 = s00; // for replacing unused cols
mine.w = vmin(mine.w, s00.w) //min accumulation 0.0
maxe.w = vmax(maxe.w, s00.w) //max accumulation 0.0
} {
if(p2) stmp0 = s01 // col 0.1 if needed
stmp1 = s10
s0_sh.w = vasl(s00.w, zshift) //
p3 = cmp.gt(col_count, #-6) // do we need column 2?
} {
if(p2) stmp1 = s11 // col 1.1 if needed
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation, col 0.1
mine.w = vmin(mine.w, stmp0.w) //min accumulation, col 0.1
col_count = add(col_count, #-8) //decrement width count by 8
} {
s1_sh.w = vasl(s01.w, zshift) //
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation 1.1
mino.w = vmin(mino.w, stmp1.w) //min accumulation 1.1
// adjust = memw(sp+#23<<2) //
} {
if( p3 ) stmp0 = s02; // col 2 if needed
if( p3 ) stmp1 = s12; // col 2 if needed
p2 = cmp.gt(col_count, #-5) // do we need column 3?
p0 = cmp.gt(col_count, #-4) // cols 4..7 needed (also for store)
} {
s00.w = vmpye(s0_sh.w, vrecip0.uh)
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation 0.2
mine.w = vmin(mine.w, stmp0.w) //min accumulation 0.2
} {
s00.w += vmpyo(s0_sh.w, vrecip0.h):SSR
mino.w = vmin(mino.w, stmp1.w) //min accumulation col 1.2
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation col 1.2
// ptr_x0 = sub(ptr_x0, adjust) //-=filt_height if stride_height > filt_height
} {
s01.w = vmpye(s1_sh.w, vrecip0.uh)
s0_sh.w = vasl(s02.w, zshift) //
if( p2 ) stmp0 = s03; // col 3 if needed
p3 = cmp.gt(col_count, #-3) // do we need column 5?
} {
if( p2 ) stmp1 = s13; // col 3 if needed
mine.w = vmin(mine.w, stmp0.w) //min accumulation col 0.3
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation col 0.3
} {
s01.w += vmpyo(s1_sh.w, vrecip0.h):SSR
mino.w = vmin(mino.w, stmp1.w) //min accumulation col 1.3
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation col 1.3
} {
s02.w = vmpye(s0_sh.w, vrecip0.uh)
s1_sh.w = vasl(s03.w, zshift) //
if( p0 ) stmp0 = s04; // col 4 if needed
} {
s02.w += vmpyo(s0_sh.w, vrecip0.h):SSR
mine.w = vmin(mine.w, stmp0.w) //min accumulation col 0.4
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation col 0.4
} {
if( p0 ) stmp1 = s14; // col 4 if needed
if( p3 ) stmp0 = s05; // col 5 if needed
maxo.w = vmax(maxo.w, s10.w) //max accumulation 1.0
mino.w = vmin(mino.w, s10.w) //min accumulation 1.0
} {
d010.h = vpack(s01.w, s00.w):sat //pack high 16bits of accs
s03.w = vmpye(s1_sh.w, vrecip0.uh)
s0_sh.w = vasl(s04.w, zshift) //
} {
s03.w += vmpyo(s1_sh.w, vrecip0.h):SSR
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation 1.4
mino.w = vmin(mino.w, stmp1.w) //min accumulation 1.4
ptr_x0 += mpyi(stride, #16) //stride*2*4 advance buffer by 8 outputs
} {
s04.w = vmpye(s0_sh.w, vrecip0.uh)
s1_sh.w = vasl(s05.w, zshift) //
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation 0.5
p2 = cmp.gt(col_count, #-2) // do we need column 6?
} {
d032.h = vpack(s03.w, s02.w):sat //pack high 16bits of accs
s04.w += vmpyo(s0_sh.w, vrecip0.h):SSR
if( p3 ) stmp1 = s15; // col 5 if needed
p3 = cmp.gt(col_count, #-1) // do we need column 7?
} {
mine.w = vmin(mine.w, stmp0.w) //min accumulation 0.5
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation 1.5
if( p2 ) stmp0 = s06; // col 6 if needed
} {
s05.w = vmpye(s1_sh.w, vrecip0.uh)
s0_sh.w = vasl(s06.w, zshift) //
mino.w = vmin(mino.w, stmp1.w) //min accumulation 1.5
} {
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation 0.6
s05.w += vmpyo(s1_sh.w, vrecip0.h):SSR
d03210.ub = vpack(d032.h, d010.h):sat //shift 16bits by zshift
vmem(ptr_z0++#1) = d03210.new //store 0-3 even row
} {
mine.w = vmin(mine.w, stmp0.w) //min accumulation 0.6
if( p2 ) stmp1 = s16; // col 6 if needed
} {
if( p3 ) stmp0 = s07; // col 7 if needed
s06.w = vmpye(s0_sh.w, vrecip0.uh)
s1_sh.w = vasl(s07.w, zshift) //
} {
s06.w += vmpyo(s0_sh.w, vrecip0.h):SSR
d054.h = vpack(s05.w, s04.w):sat //pack high 16bits of accs
maxo.w = vmax(maxo.w, stmp1.w) //max accumulation 1.6
} {
s07.w = vmpye(s1_sh.w, vrecip0.uh)
mino.w = vmin(mino.w, stmp1.w) //min accumulation 1.6
s0_sh.w = vasl(s10.w, zshift) //
} {
s07.w += vmpyo(s1_sh.w, vrecip0.h):SSR
maxe.w = vmax(maxe.w, stmp0.w) //max accumulation 0.7
mine.w = vmin(mine.w, stmp0.w) //min accumulation 0.7
} {
s10.w = vmpye(s0_sh.w, vrecip1.uh)
s1_sh.w = vasl(s11.w, zshift) //
if( p3 ) stmp1 = s17; // col 7 if needed
} {
d076.h = vpack(s07.w, s06.w):sat //pack high 16bits of accs
s10.w += vmpyo(s0_sh.w, vrecip1.h):SSR
maxo.w = vmax(maxo.w, stmp1.w) //min accumulation 1.7
} {
s11.w = vmpye(s1_sh.w, vrecip1.uh)
s0_sh.w = vasl(s12.w, zshift) //
mino.w = vmin(mino.w, stmp1.w) //min accumulation 1.7
} {
s11.w += vmpyo(s1_sh.w, vrecip1.h):SSR
d07654.ub = vpack(d076.h, d054.h):sat //shift 16bits by zshift
if(p0) vmem(ptr_z0++#1):nt = d07654.new //store 4-7 even row
} {
s12.w = vmpye(s0_sh.w, vrecip1.uh)
s1_sh.w = vasl(s13.w, zshift) //
} {
d110.h = vpack(s11.w, s10.w):sat //pack high 16bits of accs
s12.w += vmpyo(s0_sh.w, vrecip1.h):SSR
} {
s13.w = vmpye(s1_sh.w, vrecip1.uh)
s0_sh.w = vasl(s14.w, zshift) //
} {
loop1(.L_filt_height, filt_height) //setup vertical filte rloop
s13.w += vmpyo(s1_sh.w, vrecip1.h):SSR
p1 = cmp.gt(col_count, #0)
} {
s14.w = vmpye(s0_sh.w, vrecip1.uh)
s1_sh.w = vasl(s15.w, zshift) //
p3 = sp1loop0(.L_filt_width, filt_width) //setup inner filter loop
} {
d132.h = vpack(s13.w, s12.w):sat //pack high 16bits of accs
s14.w += vmpyo(s0_sh.w, vrecip1.h):SSR
} {
s15.w = vmpye(s1_sh.w, vrecip1.uh)
s0_sh.w = vasl(s16.w, zshift) //
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
} {
s15.w += vmpyo(s1_sh.w, vrecip1.h):SSR
d13210.ub = vpack(d132.h, d110.h):sat //shift 16bits by zshift
vmem(ptr_z1++#1):nt = d13210.new //store 0-3 odd row
} {
s16.w = vmpye(s0_sh.w, vrecip1.uh)
s1_sh.w = vasl(s17.w, zshift) //
wsum1 = vmem(ptr_filtsum+#1) //set 2nd weight offset
if(p1)zshift = #0 // force zshift <-0 (except last loop)
//zshift = mux(p1,#0,zshift) // force zshift <-0 (except last loop)
} {
s16.w += vmpyo(s0_sh.w, vrecip1.h):SSR
d154.h = vpack(s15.w, s14.w):sat //pack high 16bits of accs
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
maxe.w =vasl(maxe.w,zshift) // find maxe <<= zshift (on last)
} {
s17.w = vmpye(s1_sh.w, vrecip1.uh)
s04_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
p2 = cmp.gt(fetch_ptr_base, cbuf_eob) //[E,10]
if(p2.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size)//[E,10]wrap around end fetch ptr
} {
s17.w += vmpyo(s1_sh.w, vrecip1.h):SSR
s05_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
} {
s06_s02 = vcombine(wsum0,wsum0) //init sum2 and 6
s07_s03 = vcombine(wsum0,wsum0) //init sum3 and 7
ptr_w0 = memw(sp+#13<<2) //access ptr weight
} {
d176.h = vpack(s17.w, s16.w):sat //pack high 16bits of accs
s14_s10 = vcombine(wsum1,wsum1) //init sum 0 and 4
ptr_w1 = memw(sp+#20<<2) //access weights stride
} {
s15_s11 = vcombine(wsum1,wsum1) //init sum 1 and 5
s16_s12 = vcombine(wsum1,wsum1) //init sum 2 and 6
weight_stride = memw(sp+#24<<2) //
} {
s17_s13 = vcombine(wsum1,wsum1) //init sum 3 and 7
d17654.ub = vpack(d176.h, d154.h):sat //shift 16bits by zshift
if( p0) vmem(ptr_z1++#1):nt = d17654.new //store 4-7 odd row
if( p1) jump .L_width //next 2 rows 8 points per row
}//endloop width
/* --------------------------------------------------------------------------- */
// need to apply << zshift to mine, maxo, mino;
// maxe has been done already
// // The below then scales these by the per-depth gains, applies to gmin/gmax,
// and resets mine:maxe and mino:maxo to +7fffffff : -7fffffff
// (maxo is copied from maxe at the top of the loop, so we don't need that here)
//
{ tmax.w = vmpye(maxe.w, vrecip0.uh)
recip_level = memw(sp+#37<<2) //255/max
p0 = cmp.eq(out_height, #0) //are vertical lines done?
mine.w = vasl(mine.w,zshift)
} {
tmax.w+= vmpyo(maxe.w, vrecip0.h):SSR //
maxe = #0 //
memw(sp+#13<<2) += weight_stride //access ptr weight
} {
maxo.w = vasl(maxo.w,zshift)
gmax.w = vmax(gmax.w, tmax.w)
tmin.w = vmpye(mine.w, vrecip0.uh)
} {
tmin.w+= vmpyo(mine.w, vrecip0.h):SSR //
wscale = vrecip1
memw(sp+#20<<2) += weight_stride //access weights stride
mine = vnot(maxe) // 0xFFFFFFFF
} {
gmin.w = vmin(gmin.w, tmin.w)
tmax.w = vmpye(maxo.w, wscale.uh)
mino.w = vasl(mino.w,zshift)
} {
mine.uw = vavg(mine.uw,maxe.uw) // mine = 0x7FFFFFFF
tmax.w+= vmpyo(maxo.w, wscale.h):SSR //
if(!p0)vrecip0 = vmem(recip_level++#2) //used to compress to 8bits 255/max
col_count = memw(sp+#15<<2) //initialize width count
} {
gmax.w = vmax(gmax.w, tmax.w)
tmin.w = vmpye(mino.w, wscale.uh)
if(!p0)vrecip1 = vmem(recip_level+#-1) //used to compress to 8bits 255/max
} {
//memw(sp+#43<<2) = ptr_equalize //
tmin.w+= vmpyo(mino.w, wscale.h):SSR //
maxe.w = vsub(maxe.w,mine.w) // maxe = -0x7fffffff
mino = mine //
} {
gmin.w = vmin(gmin.w, tmin.w)
ptr_filtsum = add(ptr_filtsum, #256)
memw(sp+#37<<2) = recip_level //255/max
if(!p0) jump:t .L_height //then go again
}
/* ------------------------------------------------------------------------ */
#if 0
.L_domax:
{ ptr_max = memw(sp+#36<<2) //get max/min ptr
cm4 = #-4 //define int based deal
} {
loop0(.L_peak, #4) //set up vec reduce
maxo_maxe = vdeal(maxe, maxe, cm4) //deal out odd and even
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //reduce
mino_mine = vdeal(mine, mine, cm4) //split out and and even min
} {
mine.w = vmin(mine.w, mino.w) //reduce mins by 2
} {
maxo_maxe = vdeal(maxe, maxe, cm4) //split out odd and even max
}:endloop0
{ maxe.w = vmax(maxo.w, maxe.w) //reduce max
vmem(ptr_max+#0) = maxe.new //store max
mino_mine = vdeal(mine, mine, cm4) //split out mins
} {
mine.w = vmin(mino.w, mine.w) //reduce mins to final 1
vmem(ptr_max+#1) = mine.new //store min
}
/* ------------------------------------------------------------------------ */
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#2<<2) //18,19
#else
{ vmem(ptr_max+#0) = gmax //store max
r17:16 = memd(sp+#0) //restore stack
} {
r19:18 = memd(sp+#2<<2) //18,19
vmem(ptr_max+#1) = gmin //store min
#endif
} {
r21:20 = memd(sp+#4<<2) //20,21
r23:22 = memd(sp+#6<<2) //22,23
} {
r25:24 = memd(sp+#8<<2) //24,25
r27:26 = memd(sp+#10<<2) //26,27
} {
dealloc_return //
}
/* ------------------------------------------------------------------------ */
.L_end:
/* ======================================================================== */
.size gvconv2dbbb_circ_d64_v65_asm, .L_end-gvconv2dbbb_circ_d64_v65_asm
|
XiaoMi/nnlib | 24,111 | hexagon/asm_src/lrn_d32_hvx.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
void lrn_d32_hvx(uint8_t const * in_ptr, int in_depth, int in_offset, int radius, int32_t * tmp_buf,
uint8_t * lrn_ptr,
int kappa, int sigma, int beta, int recip, int out_offset,
int next_d32_row, int next_logical_line,
width, height, int depth_rng)
Behavioral Model
----------------
*/
#if 0
void lrn_f(float *ptr_x, int in_depth, int in_offset, int radius, float *tmp, float *ptr_y,
float K, float alpha, float beta, float *omin, float *omax, int width, int height)
{
int i, j, k, h, v, w, rows = in_depth/32;
float sum, sigma, u, val;
*omin = 99999; *omax =-99999;
for(v=0; v < height; v++)
{
for(h=0; h < width/4; h++)
{
for(i=0; i < rows; i+=1)
{
for(j=0; j < 128; j++)
{
u = ptr_x[v*width*in_depth +128*h+ i*32*width+j];
tmp[128*i+j] = u*u;
}
}
for(w = 0; w < 128; w+=32)
for(i=0; i < rows; i++) {
for(j=0; j < 32; j++)
{
sum = 0;
for(k=-radius; k <= radius; k++)
{
if((k+j) < 0 && i > 0)
sum += tmp[(i-1)*128+k+j+w+32];
else if((k+j) >= 32 && i < rows-1)
sum += tmp[(i+1)*128+k+j+w-32];
else if((k+j) < 32 && (k+j) >= 0)
sum += tmp[i*128+k+j+w];
}
sigma = exp2( beta*log2(K + alpha*sum));
u = ptr_x[v*width*in_depth +128*h+ i*32*width+j+w];
val = sigma*u;
if(val > *omax) *omax = val;
if(val < *omin) *omin = val;
ptr_y[v*width*in_depth +128*h+ i*32*width+j+w] = val;
}//end j
}//end i/w
}//end h
}//end v
}
#endif
/*
Methods+Notes
-------------
step_in = (imax - imin )/255.0;
- scaled the k vaue by step size to avoid mpy
offset = k/(alpha*step_in*step_in);
- scaled step size by 4 and require data to be scaled by 4 for square
in_offset = (int) (4.0*imin/step_in + 0.0);
- offset scaled by 16 more accurate
- all absorbed into sigma
gamma =log2(alpha*step_in*step_in)-4.0; <abosrd 16 with log
sigma = -log2(imax-imin)+log2(omax-omin) - beta * gamma ;
-beta replaced by sigma
from radius sum use cl0 and shift to covert to float
log 2**sum_exp*mant_sum = log(mant_sum) + sem_exp
remove leading 1 from mant_sum and becomes log2(1+dmant_sum)
dmant_sum is 15bit and poly is done using S16 arithmetic.
sum_exp is implemented using shift.
Width must be a multiple of 4, and depth a multiple of 32; however
the parameter depth_rng supports depth edge padding.
depth_rng is d_lo in lower 16 bits, d_hi in upper, where
d_lo is 0..31 = 'left padding' in first depth slice
d_hi = 1..32 = (32-right padding); this is the number of valid
elements in the last depth slice (if depth=32, it
also includes d_lo)
so depth = d_lo + (actual_depth) + 32 -d_hi
A value of depth_rng = 0 is equivalent to (32<<16) i.e. d_lo = 0, d_hi = 32;
this is the 'no padding' case.
Edge padding is implemented by setting up q0 and q1:
q0 : out of first 64: 2*d_lo are 1, rest 0; the 2nd 64 is the same
q1 : out of first 64: 2*d_hi are 1, rest 0; the 2nd 64 is the same
for depth_rng=0, we will have q0 = {all 0}, q1 = { all 1}
These masks are applied before squaring the inpouts; they correct the window
contexts for the valid outputs which are near the edge, but the outputs
which fall within the padding are still going to be indeterminate.
Makes use of:
extern const int16_t lut_Log2_and_Pow2[6*64]
- 1st 3 are luts for Log(x-1) (2nd order x 16)
- 2nd 3 are luts for Pow2(-x) (2nd order x 16)
extern const uint8_t const_Count64[128]
= { 0,1, 63, 0, 1, .. 63 }
(used for depth masking)
*/
/* -------------------------------------------------------------*/
.text
.file "lrn_d32_hvx.S"
.global lrn_d32_hvx
.balign 32
.type lrn_d32_hvx, @function
/* -------------------------------------------------------------*/
lrn_d32_hvx:
/* -------------------------------------------------------------*/
#define in_ptr00 r0 //pointer to input data
#define in_depth r1 //
#define in_offset r2 //pointer to input zero
#define radius r3 //radious of lrn - window is 2*radius + 1
#define tmp_buf0 r4 //pointer to temp bug to hold sum sq -ints
#define lrn_ptr00 r5 //pointer to output of lrn 'd data
#define kappa r14 //K in the paper
#define sigma r15 //bunch of corrections and junk to do with compression
#define beta r16 //as per paper
#define recip r27 //
#define out_offset r17 //step size correction
#define next_in_width r10 //physical width of array 32*width
#define next_logcl_row r22 //distance to next entire row of new depths
#define in_width r13 //how much work per line
#define in_height r23 //number of logical wors
#define log_exp r26 //table to log2(1+x) and exp(-x)
/* -------------------------------------------------------------*/
#define in_ptr0 r24 //pointer to input data
#define lrn_ptr0 r25 //pointer to output of lrn 'd data
#define col_count r20
#define rows r9 //num slices of bread
#define rowsb r12
#define tmp_buf r8 //
#define lrn_ptr r21 //pointer to output of lrn 'd data
#define cm1 r7 //= -1
#define cm4 r6 //= -4
#define cm2 r2
#define eob_count r11
#define offset r1
#define offset1 r7
#define in_ptr_o r14 //in ptr for output calculation
#define in_ptr_i r15 //in ptr for input calculation
#define c0 r1 //0
/* -------------------------------------------------------------*/
#define x3x2x1x0 v0
#define x3x2_x1x0 v29:28
#define x3x2 v29
#define x1x0 v28
#define x3_x2 v5:4
#define y3y2 v21
#define y1y0 v20
#define x2 v4
#define x3 v5
#define x1_x0 v7:6
#define x1 v7
#define x0 v6
#define vzero0 v8
#define vzero1 v9
#define vzero2 v10
#define vzero3 v11
#define x00 v3
#define x10 v12
#define x20 v13
#define x30 v18
#define x01 v14
#define x11 v15
#define x21 v16
#define x31 v17
#define xe0 v14
#define xe1 v15
#define xe1_xe0 v15:14
#define xe2 v16
#define xe3 v17
#define xe3_xe2 v17:16
#define d0 v1
#define d1 v1
#define d2 v1
#define d3 v1
#define sum0 v20
#define sum1 v21
#define sum2 v22
#define sum3 v23
#define xm0 v20
#define xm1 v21
#define xm1_xm0 v21:20
#define xm2 v22
#define xm3 v23
#define xm3_xm2 v23:22
#define xm1xm0 v20
#define xm3xm2 v21
#define xm3xm2_xm1xm0 v21:20
#define dx1dx0 v22
#define dx3dx2 v23
#define vin_offset v19
#define vout_offset v30
#define vzero v24
#define vkappa v25
#define vsigma v26
#define vbeta v27
#define range v1
#define tab v2 //lut entry
#define coef1 v29
#define coef0 v28
#define coef1_coef0 v29:28
/* -------------------------------------------------------------*/
{
allocframe(#56)
q1 = or(q0,!q0) // set to all 1
r6 = memw(sp+#(25-16)<<2) // depth_rng
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
q0 = and(q0,!q0); // set to all 0;
} {
memd(sp+#16) = r21:20
memd(sp+#24) = r23:22
p1 = cmp.eq(r6,#0)
} {
memd(sp+#32) = r25:24
memd(sp+#40) = r27:26
if( p1 ) jump:t .L_no_depth_pad
}
//>>> set up q0,q1 for depth range, based on r6
{
r6 = add(r6,r6) // d_lo * 2 ( in ls byte )
r7 = lsr(r6,#15) // d_hi * 2 (in ls byte )
r16 = add(PC,##const_Count64@PCREL) // pointer to const vec
} {
r6 = vsplatb(r6)
r7 = vsplatb(r7)
v2 = vmem(r16+#0) // v2 = { 0, 1, ... 63, 0,1, .. 63 }
} {
v0 = vsplat(r6) // all 2*d_lo
v1 = vsplat(r7) // all 2*d_hi
} {
q0 = vcmp.gt( v0.ub,v2.ub) // q0: i < 2*d_lo
q1 = vcmp.gt( v1.ub,v2.ub) // q1: i < 2*d_hi
}
// <<<<
.L_no_depth_pad:
{
kappa = memw(sp+#16<<2)
sigma = memw(sp+#17<<2)
} {
beta = memw(sp+#18<<2)
recip = memw(sp+#19<<2)
} {
out_offset = memw(sp+#20<<2)
recip = combine(recip.L, recip.L)
} {
next_in_width = memw(sp+#21<<2)
next_logcl_row = memw(sp+#22<<2)
log_exp = add(PC,##lut_Log2_and_Pow2@PCREL) // pointer to poly tables
} {
in_width = memw(sp+#23<<2)
in_height = memw(sp+#24<<2)
vsigma = vsplat(sigma)
} {
in_offset = combine(in_offset.L, in_offset.L)
M0 = next_in_width
rows = lsr(in_depth, #5) //number of 32depths in the depth
vzero = #0
} {
vin_offset = vsplat(in_offset)
vbeta = vsplat(beta)
out_offset = combine(out_offset.L, out_offset.L)
} {
vkappa = vsplat(kappa)
cm1 = #-1
rowsb = add(rows, #-1)
vout_offset = vsplat(out_offset)
}
/* -------------------------------------------------------------*/
.balign 32
.L_height:
{
col_count = in_width;
in_height = add(in_height, #-1)
in_ptr0 = in_ptr00
lrn_ptr0 = lrn_ptr00
}
.L_width:
/* -------------------------------------------------------------*/
{
x3x2x1x0.tmp = vmem(in_ptr0+#0) //[0, 0]
x3x2_x1x0=vshuff(vzero,x3x2x1x0,cm1) //[0, 0]shuffle in zeros
cm2 = #-2 //
in_ptr_i = add(in_ptr0, next_in_width) //
} {
cm4 = #-4
in_ptr_o = in_ptr0
p0 = cmp.eq(rowsb, #0) // skip loop if rows=1
} {
y1y0.h = vadd(x1x0.h, vin_offset.h) //[0, 3]
p3 = sp1loop0(.L_depth32, rowsb) //
tmp_buf = tmp_buf0
} {
col_count = add(col_count, #-4)
lrn_ptr = lrn_ptr0
y3y2.h = vadd(x3x2.h, vin_offset.h) //[0, 4]
} {
y1y0 = vmux( q0, vzero, y1y0 ); // 'left side' depth masking
y3y2 = vmux( q0, vzero, y3y2 );
if(p0) jump .L_skip
}
/* -------------------------------------------------------------*/
.balign 32
.L_depth32:
{
x1_x0.w = vmpy(y1y0.h, y1y0.h) //[0, 5]x1 x0 x1 x0:x1 x0 x1 x0
x3x2x1x0.tmp = vmem(in_ptr_i++M0) //[1, 0]
x3x2_x1x0=vshuff(vzero,x3x2x1x0,cm1) //[1, 0]shuffle in zeros
} {
x3_x2.w = vmpy(y3y2.h, y3y2.h) //[0, 6]x3 x2 x3 x2:x3 x2 x3 x2
if(p3) vmem(tmp_buf+#-1) = x3 //[0, 6]
} {
x1_x0 = vshuff(x1, x0, cm4) //[0, 7]
vmem(tmp_buf++#2) = x0.new //[0, 7]
} {
x3_x2 = vshuff(x3, x2, cm4) //[0, 8]
vmem(tmp_buf++#2) = x2.new //[0, 8]
y1y0.h = vadd(x1x0.h, vin_offset.h) //[1, 3]
} {
vmem(tmp_buf+#-3) = x1 //[0, 9]
y3y2.h = vadd(x3x2.h, vin_offset.h) //[1, 4]
}:endloop0
/* -------------------------------------------------------------*/
.L_skip:
{
y1y0 = vmux(q1, y1y0, vzero); // 'right side' depth masking
y3y2 = vmux(q1, y3y2, vzero);
}
{
x1_x0.w = vmpy(y1y0.h, y1y0.h) //[1, 5]x1 x0 x1 x0:x1 x0 x1 x0
vzero0 = #0
} {
x3_x2.w = vmpy(y3y2.h, y3y2.h) //[1, 6]x3 x2 x3 x2:x3 x2 x3 x2
if(p3) vmem(tmp_buf+#-1) = x3 //[1, 6]
vzero1 = #0
} {
x1_x0 = vshuff(x1, x0, cm4) //[1, 7]
vmem(tmp_buf++#2) = x0.new //[1, 7]
vzero2 = #0
} {
x3_x2 = vshuff(x3, x2, cm4) //[1, 8]
vmem(tmp_buf++#2) = x2.new //[1, 8]
vzero3 = #0
} {
vmem(tmp_buf+#-3) = x1 //[1, 9]
} {
vmem(tmp_buf+#-1) = x3 //[1,10]
tmp_buf = tmp_buf0
}
/* -------------------------------------------------------------*/
{
x00 = vmem(tmp_buf++#1)
loop1(.L_depth1, rows)
} {
x10 = vmem(tmp_buf++#1)
eob_count = add(rows, #-1)
} {
x20 = vmem(tmp_buf++#1)
} {
x30 = vmem(tmp_buf++#1)
}
/* -------------------------------------------------------------*/
.balign 32
.L_depth1:
{
x01 = vmem(tmp_buf++#1)
p0 = cmp.eq(eob_count, #0) //at end of block of in_depth flush pipe
sum0.w = vadd(x00.w, vkappa.w)
} {
if(p0) x01 = vzero //empty pipe at end
x11 = vmem(tmp_buf++#1)
sum1.w = vadd(x10.w, vkappa.w)
} {
if(p0) x11 = vzero //empty pipe at end
x21 = vmem(tmp_buf++#1)
sum2.w = vadd(x20.w, vkappa.w)
d3 = #0
} {
if(p0) x21 = vzero //empty pipe at end
x31 = vmem(tmp_buf++#1)
offset = asl(radius, #2) //convert to ints radiux
} {
offset1 = #4 //start from 1
if(p0) x31 = vzero //empty pipe at end
sum3.w = vadd(x30.w, vkappa.w)
loop0(.L_win_sum, radius) //win is the window radious wn = 2N+1
}
/* -------------------------------------------------------------*/
.balign 32
.L_win_sum: //perform window sum for this level of depth
{
d0 = vlalign(x00, vzero0, offset)
sum3.w = vadd(sum3.w, d3.w)
} {
sum0.w = vadd(sum0.w, d0.w)
d0 = valign(x01, x00, offset1)
} {
sum0.w = vadd(sum0.w, d0.w)
d1 = vlalign(x10, vzero1, offset)
} {
sum1.w = vadd(sum1.w, d1.w)
d1 = valign(x11, x10, offset1)
} {
sum1.w = vadd(sum1.w, d1.w)
d2 = vlalign(x20, vzero2, offset)
} {
sum2.w = vadd(sum2.w, d2.w)
d2 = valign(x21, x20, offset1)
} {
sum2.w = vadd(sum2.w, d2.w)
d3 = vlalign(x30, vzero3, offset)
offset = add(offset, #-4)
} {
sum3.w = vadd(sum3.w, d3.w)
d3 = valign(x31, x30, offset1)
offset1 = add(offset1, #4)
}:endloop0
/* -------------------------------------------------------------*/
{
eob_count = add(eob_count, #-1)
vzero0 = x00
x00 = x01
r19 = #31
} {
vzero1 = x10
x10 = x11
sum3.w = vadd(sum3.w, d3.w)
v31 = vsplat(r19)
} {
vzero2 = x20
x20 = x21
xe0.uw = vcl0(sum0.uw) //xe = Q6_R_cl0_R(sum);find leading bit
} {
vzero3 = x30
x30 = x31
xm0.w = vasl(sum0.w, xe0.w) //xm=(sum<<xe)>>16; //overshift left to make 0-1
xe0.w = vsub(v31.w, xe0.w) //xe=31-xe; number is xm * 2^xe log of this xe + log(xm)
} {
xe1.uw = vcl0(sum1.uw)
} {
xm1.w = vasl(sum1.w, xe1.w)
xe1.w = vsub(v31.w, xe1.w)
} {
xe2.uw = vcl0(sum2.uw)
} {
xm1xm0.h = vpacko(xm1.w, xm0.w)
xm2.w = vasl(sum2.w, xe2.w)
xe2.w = vsub(v31.w, xe2.w)
} {
xe3.uw = vcl0(sum3.uw)
} {
xm3.w = vasl(sum3.w, xe3.w)
xe3.w = vsub(v31.w, xe3.w)
r19 = #15
} {
xe0.w = vasl(xe0.w, r19) //xe <<= 15
r18 = ##0x78787878; //0111100s
} {
xe1.w = vasl(xe1.w, r19) //
xm3xm2.h = vpacko(xm3.w, xm2.w) //part of log2
v31 = vsplat(r18)
} {
xe2.w = vasl(xe2.w, r19) //
r18 = ##0x07ff07ff;
tab = vmem(log_exp+#2) //xm = log2tab[range][2];
} {
xe3.w = vasl(xe3.w, r19) //
range.b = vshuffo(xm3xm2.b, xm1xm0.b)
v29 = vsplat(r18)
} {
range = vand(range, v31) //xm=log21px(xm); only do xm, exponent is a shift
dx3dx2 = vand(xm3xm2, v29) //deltax = (xm & 0x7ff);
dx1dx0 = vand(xm1xm0, v29)
c0 = #0
} {
r19 = #3
dx3dx2.h = vadd(dx3dx2.h, dx3dx2.h) //deltax = deltax+deltax; //scale factor 2
} {
range.uh = vlsr(range.uh, r19) //range = (xm >> 11) & 15;
} {
xm3xm2_xm1xm0.h = vlut16(range.b, tab.h, c0)
tab = vmem(log_exp+#1) //xm= mpyrsat(xm, deltax) + log2tab[range][1];
} {
dx1dx0.h = vadd(dx1dx0.h, dx1dx0.h)
} {
xm3xm2.h = vmpy(xm3xm2.h, dx3dx2.h):<<1:rnd:sat
coef1_coef0.h = vlut16(range.b, tab.h, c0)
} {
xm1xm0.h = vmpy(xm1xm0.h, dx1dx0.h):<<1:rnd:sat
xm3xm2.h = vadd(xm3xm2.h, coef1.h)
} {
xm1xm0.h = vadd(xm1xm0.h, coef0.h)
} {
xm3xm2.h = vmpy(xm3xm2.h, dx3dx2.h):<<1:rnd:sat
tab.tmp = vmem(log_exp+#0) //xm= mpyrsat(xm, deltax) + log2tab[range][0];
coef1_coef0.h = vlut16(range.b, tab.h, c0)
} {
xm1xm0.h = vmpy(xm1xm0.h, dx1dx0.h):<<1:rnd:sat
xm3xm2.h = vadd(xm3xm2.h, coef1.h)
} {
xm1xm0.h = vadd(xm1xm0.h, coef0.h)
} {
xm3_xm2 = vshuff(vzero, xm3xm2, cm2)
r18 = #12
} {
xm1_xm0 = vshuff(vzero, xm1xm0, cm2)
xe2 = vor(xm2, xe2)
xe3 = vor(xm3, xe3)
r19 = #15
} {
xe0 = vor(xm0, xe0) //x=(((int) xe)<<15)|(int)xm;reassemble xe + log(xm)
xe1 = vor(xm1, xe1)
v31 = vsplat(r18)
} {
xm2.w = vmpyo(xe2.w, vbeta.h):<<1:rnd:sat
} {
xm2.w = vsub(vsigma.w, xm2.w)
xm3.w = vmpyo(xe3.w, vbeta.h):<<1:rnd:sat
} {
xm3.w = vsub(vsigma.w, xm3.w) //x = sigma-x; subtract from correction factor
xm0.w = vmpyo(xe0.w, vbeta.h):<<1:rnd:sat//x=x*beta);//apply beta*log(sum) 16*32bit
} {
xm1.w = vmpyo(xe1.w, vbeta.h):<<1:rnd:sat
xm0.w = vsub(vsigma.w, xm0.w)
xe2.w = vasr(xm2.w, r19)
} {
xm1.w = vsub(vsigma.w, xm1.w)
xe3.w = vasr(xm3.w, r19)
xe2.w = vadd(xe2.w, v31.w)
} {
xe0.w = vasr(xm0.w, r19)
xe3.w = vadd(xe3.w, v31.w)
} {
xe1.w = vasr(xm1.w, r19)
xe0.w = vadd(xe0.w, v31.w) //xe+12
xm1xm0.h = vpacke(xm1.w, xm0.w)
} {
xe1.w = vadd(xe1.w, v31.w)
xm3xm2.h = vpacke(xm3.w, xm2.w) //ym = exp2mx(x & 0x7fff); exponent of fraction xe is a shift left
} {
xe3_xe2 = vdeal(xe3, xe2, cm4) //ye = (x>>15) //get the final exponent
r18 = ##0x78787878; //01111000
} {
xe1_xe0 = vdeal(xe1, xe0, cm4) //get into right order
range.b = vshuffo(xm3xm2.b, xm1xm0.b)
v31 = vsplat(r18)
} {
range = vand(range, v31)
r19 = #3
r18 = ##0x07ff07ff;
} {
v31 = vsplat(r18)
} {
range.uh = vlsr(range.uh, r19) //range = (xm >> 11) & 15;
dx3dx2 = vand(xm3xm2, v31) //deltax = (xm & 0x7ff);
dx1dx0 = vand(xm1xm0, v31)
} {
tab.tmp = vmem(log_exp+#5) //ym= exp2tab[range][2];
xm3xm2_xm1xm0.h = vlut16(range.b, tab.h, c0)
} {
tab.tmp = vmem(log_exp+#4) //ym= mpyrsat(ym, deltax) + exp2tab[range][1];
coef1_coef0.h = vlut16(range.b, tab.h, c0)
} {
xm3xm2.h = vmpy(xm3xm2.h, dx3dx2.h):<<1:rnd:sat
} {
xm3xm2.h = vadd(xm3xm2.h, coef1.h)
xm1xm0.h = vmpy(xm1xm0.h, dx1dx0.h):<<1:rnd:sat
} {
xm1xm0.h = vadd(xm1xm0.h, coef0.h)
tab.tmp = vmem(log_exp+#3) //ym= mpyrsat(ym, deltax) + exp2tab[range][0];
coef1_coef0.h = vlut16(range.b, tab.h, c0)
} {
xm3xm2.h = vmpy(xm3xm2.h, dx3dx2.h):<<1:rnd:sat
} {
xm1xm0.h = vmpy(xm1xm0.h, dx1dx0.h):<<1:rnd:sat
xm3xm2.h = vadd(xm3xm2.h, coef1.h)
cm1 = #-1
} {
xm1xm0.h = vadd(xm1xm0.h, coef0.h)
} {
x3x2x1x0.tmp = vmem(in_ptr_o++M0) //xm = ptr_x[128*i+j+w]+in_offset;//back into a real number
x3x2_x1x0=vshuff(vzero,x3x2x1x0,cm1) //shuffle in zeros
} {
x3x2.h = vadd(x3x2.h, vin_offset.h) //
} {
x1x0.h = vadd(x1x0.h, vin_offset.h) //
} {
xm3_xm2.w = vmpy(xm3xm2.h, x3x2.h) //
} {
xm1_xm0.w = vmpy(xm1xm0.h, x1x0.h) //y=xm*ym//apply lrn factor and output range value
} {
xm0.w = vasr(xm0.w, xe0.w)
} {
xm1.w = vasr(xm1.w, xe1.w)
} {
xm2.w = vasr(xm2.w, xe2.w)
} {
xm3.w = vasr(xm3.w, xe3.w)
xm1xm0.h = vpacke(xm1.w, xm0.w)
} {
} {
xm1xm0.h = vmpy(xm1xm0.h, recip.h):<<1:rnd:sat //ym = (ym*recip+0x4000)>15
xm3xm2.h = vpacke(xm3.w, xm2.w)
} {
xm1xm0.h = vsub(xm1xm0.h, vout_offset.h) //produce the normalized output values in 0-255
} {
xm3xm2.h = vmpy(xm3xm2.h, recip.h):<<1:rnd:sat
} {
xm1xm0.h = vshuff(xm1xm0.h)
} {
xm3xm2.h = vshuff(xm3xm2.h)
} {
xm3xm2.h = vsub(xm3xm2.h, vout_offset.h) //ym = (y >> (15+ye))-out_offset;
} {
x3x2x1x0.ub = vpack(xm3xm2.h, xm1xm0.h):sat //if(ym>255)ym=255; elsif(ym<0)ym=0;sat 8bits
vmem(lrn_ptr++M0) = x3x2x1x0.new
}:endloop1
/* -------------------------------------------------------------*/
{
in_ptr0 = add(in_ptr0, #128)
lrn_ptr0 = add(lrn_ptr0, #128)
p0 = cmp.eq(col_count, #0)
if(!p0.new) jump:t .L_width
} {
in_ptr00 = add(in_ptr00, next_logcl_row) //next logical row-(in_depth/32)*width*32
lrn_ptr00 = add(lrn_ptr00, next_logcl_row)
p0 = cmp.eq(in_height, #0)
if(!p0.new) jump:t .L_height
}
/* -------------------------------------------------------------*/
{
r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
r21:20 = memd(sp+#16)
r23:22 = memd(sp+#24)
} {
r25:24 = memd(sp+#32)
r27:26 = memd(sp+#40)
} {
dealloc_return
}
.L_end:
/* -------------------------------------------------------------*/
.size lrn_d32_hvx, .L_end-lrn_d32_hvx
/*=============================================================================*/
|
XiaoMi/nnlib | 24,146 | hexagon/asm_src/gvconv2db2b2b2_d32_h_v65.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2db2b2b2_d32_v65_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* INPUT : R0 : uint8_t *in_bufe
* R1 : uint8_t *in_bufo
* R2 : uint8_t *out_bufe
* R3 : uint8_t *out_bufo
* R4 : uint8_t *weights
* R5 : int32_t in_width
* SP+#0 : int32_t next_out_width
* SP+#4 : int32_t out_width
* SP+#8 : int32_t stride_h_w
* SP+#12 : int32_t in_depth
* SP+#16 : int32_t filt_width
* SP+#20 : int32_t filt_height
* SP+#24 : int32_t out_height
* SP+#28 : const int32_t *biasbuf
* SP+#32 : const int32_t *suma
* SP+#36 : int32_t next_suma_row
* SP+#40 : int32_t *ptr_minmax
* SP+#44 : int32_t recip
* SP+#48 : int32_t recip_shift
*/
.text
.global gvconv2db2b2b2_d32_v65_asm
.balign 32
.type gvconv2db2b2b2_d32_v65_asm, @function
gvconv2db2b2b2_d32_v65_asm:
/*=============================================================================*/
#define SS (13*8)
#define APTR (SS+8)
#define in_bufe r0
#define in_bufo r1
#define in_bufo_in_bufe r1:0
#define out_bufe r2
#define out_bufo r3
#define out_bufo_out_bufe r3:2
#define weights r4
#define in_width r5
#define c8w r7
#define xl03x00 r8
#define xh03x00 xl03x00
#define xl23x20 xl03x00
#define xh23x20 xl03x00
#define suma0 xl03x00
#define in_width_stride_depth xl03x00
#define sumabuf xl03x00
#define xl07x04 r9
#define xl27x24 xl07x04
#define xh27x24 xl07x04
#define xh07x04 xl07x04
#define next_out_width xl07x04
#define suma1 xl07x04
#define xl07x04_xl03x00 r9:8
#define xh07x04_xh03x00 xl07x04_xl03x00
#define xl27x24_xl23x20 xl07x04_xl03x00
#define xl13x10_xl03x00 xl07x04_xl03x00
#define xh27x24_xh23x20 xl07x04_xl03x00
#define xl33x30_xl23x20 xl07x04_xl03x00
#define xh13x10_xh03x00 xl07x04_xl03x00
#define xh33x30_xh23x20 xl07x04_xl03x00
#define next_out_width_in_width_stride_depth xl07x04_xl03x00
#define xl13x10 r10
#define xh13x10 xl13x10
#define in_bufet xl13x10
#define out_bufet xl13x10
#define recip_shift xl13x10
#define suma3 xl13x10
#define xl17x14 r11
#define xh17x14 xl17x14
#define in_bufot xl17x14
#define out_bufot xl17x14
#define recipshiftval xl17x14
#define suma2 xl17x14
#define xl17x14_xl13x10 r11:10
#define xl17x14_xl07x04 xl17x14_xl13x10
#define xh17x14_xh13x10 xl17x14_xl13x10
#define in_bufoet xl17x14_xl13x10
#define out_bufoet xl17x14_xl13x10
#define xh17x14_xh07x04 xl17x14_xl13x10
#define recipshiftval_recip_shift xl17x14_xl13x10
#define ptr_xl0 r12
#define ptr_xh0 r13
#define ptr_xl0_ptr_xh0 r13:12
#define ptr_xl1 r14
#define ptr_xh1 r15
#define ptr_wl r16
#define sumat ptr_wl
#define ptr_wh r17
#define sumainc ptr_wh
#define ptr_wh_wl r17:16
#define sumainc_sumat ptr_wh_wl
#define filt_wid r18
#define filt_ht r19
#define suma r20
#define fetch_ptrh0 r20
#define stride_w r21
#define ptr_ze r22
#define ptr_zo r23
#define fetch_ptrl0 ptr_zo
#define ptr_zo_ptr_ze r23:22
#define out_y r24
#define out_x4 r25
#define xl33x30 r26
#define xh33x30 xl33x30
#define xl37x34 r27
#define xh37x34 xl37x34
#define xl37x34_xl33x30 r27:26
#define xl37x34_xl27x24 xl37x34_xl33x30
#define xh37x34_xh33x30 xl37x34_xl33x30
#define xh37x34_xh27x24 xl37x34_xl33x30
#define stride_w4 r28
#define in_width_4 r30
#define next_outputs r31
/*=============================================================================*/
#define sll0 v0
#define sll1 v1
#define sll1_sll0 v1:0
#define sll2 v2
#define sll3 v3
#define sll3_sll2 v3:2
#define shl0 v4
#define shl1 v5
#define shl1_shl0 v5:4
#define shl2 v6
#define shl3 v7
#define shl3_shl2 v7:6
#define shh0 v8
#define s0 shh0
#define vsuma0 shh0
#define shh1 v9
#define s1 shh1
#define vsuma1 shh1
#define shh1_shh0 v9:8
#define shh2 v10
#define s2 shh2
#define vsuma2 shh2
#define shh3 v11
#define vsuma3 shh3
#define shh3_shh2 v11:10
#define wh0 v12
#define wh1 v13
#define wl0 v14
#define wl1 v15
#define min_val v16
#define max_val v17
#define recipvec v18
#define wsum v19
#define constw80 v20
#define sk v21
#define y0 v22
#define y1 v23
#define y1y0 v23:22
#define y2 v24
#define y3 v25
#define s3 v26
/*=============================================================================*/
#define off_ptr_wl ( 7*8+0)
#define off_ptr_wh ( 7*8+4)
#define off_in_bufe ( 8*8+0)
#define off_in_bufo ( 8*8+4)
#define off_out_bufe ( 9*8+0)
#define off_out_bufo ( 9*8+4)
#define off_in_width_stride_depth (10*8+0)
#define off_next_out_width (10*8+4)
#define off_recip (11*8+0)
/*=============================================================================*/
{ allocframe(#SS) //
memd(R29+#0*8-APTR) = R17:16 //
r8 = #0x80 //
sll0 = #0 //
}
{ memd(R29+#1*8) = R19:18 //
memd(R29+#2*8) = R21:20 //
constw80 = VSPLAT(r8) //
sll1 = #0 //
}
{ memd(R29+#3*8) = R23:22 //
memd(R29+#4*8) = R25:24 //
sll2 = #0 //
sll3 = #0 //
}
{ memd(R29+#5*8) = R27:26 //
memd(R29+#6*8) = R31:30 //
shl0 = constw80 //
shl1 = constw80 //
}
{ r9:8 = memd(r29+#APTR+0) // out_width|next_out_width
r11:10 = memd(r29+#APTR+8) // in_depth|stride_h_w
shl2 = constw80 //
shl3 = constw80 //
}
{ r13:12 = memd(r29+#APTR+16) // filt_height|filt_width
r15:14 = memd(r29+#APTR+24) // biasbuf|out_height
r22 = zxth(r10) // stride_w
r10 = lsr(r10,#16) // stride_h
}
{ r1:0 = memd(r29+#APTR+40) // recip|ptr_minmax
memd(r29+#off_in_bufe) = in_bufo_in_bufe //
filt_wid = asl(r12,#2) //filt_wid = filt_width*4
filt_ht = asr(r11,#5) // in_depth>>5
}
{ r6 = memw(r29+#APTR+48) // recip_shift
max_val = vmem(r0+#0) //
r22 = asl(r22,#2) // stride_w*4
r7 = #1 //
}
{ min_val = vmem(r0+#1) //
recipvec = VSPLAT(r1) //
memw(r29+#APTR+8) = r22 // stride_w
stride_w4 = asl(r22,#4) // stride_w4*2
}
{ wsum = vmem(r15+#0) // biasbuf
r23 = mpyi(in_width,r11) // in_width,in_depth
in_width_4 = asl(in_width,#5) //
memd(r29+#off_out_bufe) = out_bufo_out_bufe //
}
{ r23 = mpyi(r23,r10) //in_width,in_depth*stride_h
r7 = asl(r7,r6) //
memw(r29+#off_next_out_width) = r8 //
memw(r29+#off_ptr_wl) = weights //
}
{ memw(r29+#off_in_width_stride_depth) = r23 //
r7 = combine(r7.l,r7.l) //
ptr_wh = mpyi(filt_wid,r13) // filt_wid*filt_height
filt_ht = mpyi(r13,filt_ht) // filt_height*in_depth>>5
}
{ memd(r29+#off_recip) = r7:6 //
ptr_wh = mpyi(ptr_wh,r11) // filt_width*filt_height*in_depth
next_outputs = mpyi(filt_ht,in_width_4) //
nop //
}
{ ptr_wh = addasl(weights,ptr_wh,#3) //
next_outputs += mpyi(stride_w4,#-2) // -2*(stride_w4*2)
stride_w = memw(r29+#APTR+8) //
}
{ memw(r29+#off_ptr_wh) = ptr_wh //
m0 = stride_w4 // stride_w4*2
r22 = sub(#8,stride_w4) // -2*stride_w4
}
{ m1 = r22 // -2*tsride_w4+8
out_y = memw(r29+#APTR+24) // out_height
c8w = #8 //
stride_w4 = asr(stride_w4,#1) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{ ptr_xl0_ptr_xh0 = memd(r29+#off_in_bufe) //
next_out_width_in_width_stride_depth = memd(r29+#off_in_width_stride_depth)//
out_y = add(out_y, #-1) //
p0 = cmp.eq(out_y,#1) // last iteration?
}
{ in_bufet = add(ptr_xl0,in_width_stride_depth) //
in_bufot = add(ptr_xh0,in_width_stride_depth) //
sumainc_sumat = memd(r29+#APTR+32) //
p2 = cmp.gt(r0,r0) //
}
{ ptr_zo_ptr_ze = memd(r29+#off_out_bufe) //
memd(r29+#off_in_bufe) = in_bufoet //
sumat = add(sumat,sumainc) //
loop1(.L_filt_height,filt_ht) //[p2]
}
{ out_x4 = memw(r29+#APTR+4) //
if (!p0) memw(r29+#APTR+32) = sumat //
out_bufet = addasl(ptr_ze,next_out_width,#1) //
out_bufot = add(ptr_zo,next_out_width) //
}
{ memd(r29+#off_out_bufe) = out_bufoet //
out_x4 = add(out_x4,#-4) //[p2]
p1 = cmp.gt(out_x4,#4) //[p2] last iteration
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
{ y0.uh = vpack(y1.w,y0.w):sat //
if (p2) vmem(ptr_ze++#1) = y0.new //
shh0 = wsum //
shh1 = wsum //
}
{ y1.uh = vpack(y3.w,y2.w):sat //
if (p2) vmem(ptr_ze++#1) = y1.new //
shh2 = wsum //
shh3 = wsum //
}
.L_filt_height:
{ xl07x04_xl03x00 = memd(ptr_xl0+#0) //[p0]
xl17x14_xl13x10 = memd(ptr_xl0+stride_w4<<#0) //[p0]
ptr_xl1 = addasl(ptr_xl0,stride_w,#4) //
p3 = sp1loop0(.L_filt_width, filt_wid) //
}
{ ptr_xh1 = ptr_xh0 //
ptr_xh0 = add(ptr_xh0,in_width_4) //
xl07x04 = xl13x10 //
xl13x10 = xl07x04 //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_width:
{ wh0.cur = vmem(ptr_wh++#1) //
shl1_shl0.w += vrmpy(wh0.b,xl13x10_xl03x00.ub) //
xl37x34_xl33x30 = memd(ptr_xl1+stride_w4<<#0) //
if (!p3) fetch_ptrh0 = ptr_xh0
}
{ dcfetch(fetch_ptrh0+#0) //
fetch_ptrh0 = add(fetch_ptrh0,#64) //
if (!p3) fetch_ptrl0 = add(ptr_xl0,in_width_4) //
if (!p3) ptr_xl0 = add(ptr_xl0,in_width_4) //
}
#define FAST_16B_CONV 1
#if defined(FAST_16B_CONV)
{ wh1.cur = vmem(ptr_wh++#1) //
shl1_shl0.w += vrmpy(wh1.b,xl17x14_xl07x04.ub) //
xl27x24_xl23x20 = memd(ptr_xl1++M1) //
}
{ wl0 = vmem(ptr_wl++#1) //
dcfetch(fetch_ptrl0+#0) //
xl27x24 = xl33x30 //
xl33x30 = xl27x24 //
}
{ shl3_shl2.w += vrmpy(wh0.b,xl33x30_xl23x20.ub) //
xh07x04_xh03x00 = memd(ptr_xh1++M0) //
xh17x14_xh13x10 = memd(ptr_xh1+stride_w4<<#0) //
fetch_ptrl0 = add(fetch_ptrl0,#64) //
}
{ wl1 = vmem(ptr_wl++#1) //
shl3_shl2.w += vrmpy(wh1.b,xl37x34_xl27x24.ub) //
xh07x04 = xh13x10 //
xh13x10 = xh07x04 //
}
#else
{ wl0.cur = vmem(ptr_wl++#1) //
sll1_sll0.w += vrmpy(wl0.b,xl13x10_xl03x00.ub) //L*L
xl27x24_xl23x20 = memd(ptr_xl1++M1) //
}
{ wh1.cur = vmem(ptr_wh++#1) //
shl1_shl0.w += vrmpy(wh1.b,xl17x14_xl07x04.ub) //
xl27x24 = xl33x30 //
xl33x30 = xl27x24 //
}
{ wl1.cur = vmem(ptr_wl++#1) //
sll1_sll0.w += vrmpy(wl1.b,xl17x14_xl07x04.ub) //L*L
dcfetch(fetch_ptrl0+#0) //
fetch_ptrl0 = add(fetch_ptrl0,#64) //
}
{ shl3_shl2.w += vrmpy(wh0.b,xl33x30_xl23x20.ub) //
sll3_sll2.w += vrmpy(wl0.b,xl33x30_xl23x20.ub) //L*L
xh07x04_xh03x00 = memd(ptr_xh1++M0) //
xh17x14_xh13x10 = memd(ptr_xh1+stride_w4<<#0) //
}
{ shl3_shl2.w += vrmpy(wh1.b,xl37x34_xl27x24.ub) //
sll3_sll2.w += vrmpy(wl1.b,xl37x34_xl27x24.ub) //L*L
xh07x04 = xh13x10 //
xh13x10 = xh07x04 //
}
#endif
{ shl1_shl0.w += vrmpy(wl0.b,xh13x10_xh03x00.ub) //
shh1_shh0.w += vrmpy(wh0.b,xh13x10_xh03x00.ub) //
xh27x24_xh23x20 = memd(ptr_xh1++M1) //
xh37x34_xh33x30 = memd(ptr_xh1+stride_w4<<#0) //
}
{ shl1_shl0.w += vrmpy(wl1.b,xh17x14_xh07x04.ub) //
shh1_shh0.w += vrmpy(wh1.b,xh17x14_xh07x04.ub) //
xh27x24 = xh33x30 //
xh33x30 = xh27x24 //
}
{ shl3_shl2.w += vrmpy(wl0.b,xh33x30_xh23x20.ub) //
shh3_shh2.w += vrmpy(wh0.b,xh33x30_xh23x20.ub) //
xl07x04_xl03x00 = memd(ptr_xl1++M0) //[p0]
xl17x14_xl13x10 = memd(ptr_xl1+stride_w4<<#0) //[p0]
}
{ shl3_shl2.w += vrmpy(wl1.b,xh37x34_xh27x24.ub) //
shh3_shh2.w += vrmpy(wh1.b,xh37x34_xh27x24.ub) //
xl07x04 = xl13x10 //
xl13x10 = xl07x04 //
}:endloop0:endloop1
{ shl0.w += vasr(sll0.w,c8w) //
recipshiftval_recip_shift = memd(r29+#off_recip) //
sll0 = #0 //
}
{ shl1.w += vasr(sll1.w,c8w) //
ptr_xl0 = sub(ptr_xl0,next_outputs) //
ptr_xh0 = sub(ptr_xh0,next_outputs) //
sll1 = #0 //
}
{ shh0.w += vasr(shl0.w,c8w) //
loop1(.L_filt_height,filt_ht) //[p2]
shl0 = constw80 //
}
{ shh1.w += vasr(shl1.w,c8w) //
min_val.w = vmin(min_val.w,shh0.w) //
max_val.w = vmax(max_val.w,shh0.w) //
sk = shh0 //
}
{ s0.w = vmpyi(shh0.w,recipshiftval.h) //s0=Q6_Vw_vasl_VwR(shh0.h,recip_shift)
shl2.w += vasr(sll2.w,c8w) //
shl1 = constw80 //
p2 = cmp.gt(out_x4,#1-4) //should s1 be included ?
}
{ if (p2) sk = shh1 //
s1.w = vmpyi(shh1.w,recipshiftval.h) //s1=Q6_Vw_vasl_VwR(shh1.h,recip_shift)
shl3.w += vasr(sll3.w,c8w) //
}
{ shh2.w += vasr(shl2.w,c8w) //
y0.w = vmpye(s0.w,recipvec.uh) //
min_val.w = vmin(min_val.w,sk.w) //
p2 = cmp.gt(out_x4,#2-4) //should s2 be included ?
}
{ shh3.w += vasr(shl3.w,c8w) //
y0.w += vmpyo(s0.w,recipvec.h):<<1:rnd:sat:shift //
max_val.w = vmax(max_val.w,sk.w) //
}
{ s2.w = vasl(shh2.w,recip_shift) //
y1.w = vmpye(s1.w,recipvec.uh) //
if (p2) sk = shh2 //
p2 = cmp.gt(out_x4,#3-4) //should s3 be included ?
}
{ s3.w = vasl(shh3.w,recip_shift) //
y1.w += vmpyo(s1.w,recipvec.h):<<1:rnd:sat:shift //
min_val.w = vmin(min_val.w,sk.w) //
out_x4 = add(out_x4,#-4) //[p2]
}
{ y2.w = vmpye(s2.w,recipvec.uh) //
max_val.w = vmax(max_val.w,sk.w) //
if (p2) sk = shh3 //
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
{ y2.w += vmpyo(s2.w,recipvec.h):<<1:rnd:sat:shift //
min_val.w = vmin(min_val.w,sk.w) //
max_val.w = vmax(max_val.w,sk.w) //
p2 = p1 //
}
{ y3.w = vmpye(s3.w,recipvec.uh) //
sll2 = #0 //
sll3 = #0 //
p1 = cmp.gt(out_x4,#0) //[p2] last iteration
}
{ y3.w += vmpyo(s3.w,recipvec.h):<<1:rnd:sat:shift //
shl2 = constw80 //
shl3 = constw80 //
if (p2) jump:t .L_width //
}
/* ---------------------------------------------------------------------------- */
{ y0.uh = vpack(y1.w,y0.w):sat //
shh0 = wsum //
vmem(ptr_ze++#1) = y0.new //
}
{ y1.uh = vpack(y3.w,y2.w):sat //
vmem(ptr_ze++#1) = y1.new //
p0 = cmp.eq(out_y, #0) //
if(!p0.new) jump:t .L_height //
} //end lines per block
/* ---------------------------------------------------------------------------- */
{ r0 = memw(r29+#APTR+40) // ptr_minmax
}
{ vmem(r0+#0) = max_val //
R17:16 = memd(R29+#0*8) // restore callee-saved registers
}
{ vmem(r0+#1) = min_val //
R19:18 = memd(R29+#1*8) // restore callee-saved registers
}
{ R21:20 = memd(R29+#2*8) // restore callee-saved registers
R23:22 = memd(R29+#3*8) // restore callee-saved registers
}
{ R25:24 = memd(R29+#4*8) // restore callee-saved registers
R31:30 = memd(R29+#6*8) // restore callee-saved registers
}
{ R27:26 = memd(R29+#5*8) // restore callee-saved registers
DEALLOC_RETURN // return
}
.L_end:
/*=============================================================================*/
.size gvconv2db2b2b2_d32_v65_asm, .L_end-gvconv2db2b2b2_d32_v65_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 4,610 | hexagon/asm_src/vmemset_nt_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/* ============================================================================ */
.global vmemset_nt_asm
.type vmemset_nt_asm, @function
.balign 32
vmemset_nt_asm:
/* ============================================================================ */
#define dst r0
#define src r1
#define length r2
/* ============================================================================ */
#define dstalign r5
#define end r7
#define sel0 r8
#define kernel r3
#define sel1 r9
#define sel2 r4
#define dsto r10
#define y0 v2
#define vpredp v3
#define vprede v4
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{ sel0 = ##0x01010101 //position of qprolog
src = vsplatb(src)
end = add(length, dst) //last byte of block
} {
qprolog =vsetq(dst) //qprolog vec predicate __|---
y0 = vsplat(src)
sel1 = add(sel0, sel0) //position of modified vec predicates
end = and(end, #127) //alignment of last byte
} {
dstalign = and(dst, #127) //alignment of dst
qepilog = vsetq(end) //setup epilog vec predicate
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
length -= add(end, #-127) //round kernel up to 128 nearest
} {
vprede = vand(qepilog, sel1) //write epilog pred into vreg
qprolog = or(qprolog, !qepilog) //modified proglog if no kernel
length= lsr(length, #7) //kernel in blocks of 128
dstalign = add(dstalign, length) //amount of total data
} {
vpredp|= vand(qprolog, sel0) //store modified prolog
loop0(.L_blocks, length) //start main loop
p2 = cmp.gt(dstalign, #127) //if > 127 dont use modified prolog
if(!p2.new) sel1 = sel0 //dont choose modfied
} {
qprolog = vand(vpredp, sel1) //select the qprolog
qepilog = vand(vprede, sel1) //choose correct qepilog
}
/* ============================================================================ */
.balign 32
.L_blocks:
{
if(!qprolog) vmem(dst++#1):nt = y0 //do prolog load as part of main loop
qprolog = and(qprolog, !qprolog) //make all subsequent prologs true
}:endloop0
/* ============================================================================ */
{
if(qepilog) vmem(dst+#0):nt = y0 //store out epilog data
}{
jumpr r31 //return to caller
}
.L_end:
/* ============================================================================ */
.size vmemset_nt_asm, .L_end-vmemset_nt_asm
/* ============================================================================ */
|
XiaoMi/nnlib | 6,705 | hexagon/asm_src/visqrt64_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Perform inverse square root on 64 elements based on qf16 type.
*/
#if 0
void isqrt_cn(
short *ptr_xm, short *ptr_ein, short *ptr_ym, short *ptr_ye)
{
int i;
short u, x, mant, expnt, y, y2, range, isqrt2 = 0x5a83;
u = ptr_xm[0];
if(u<=0)u = 1;
expnt = norm16(u);
mant = u << expnt;
range = (mant >> 10) & 0xf;
x = (mant<<1) & 0x7ff;
y = lut_isqrt_cn[range+2*16];
y = mpyrsat(y, x) + lut_isqrt_cn[range+1*16];
y = mpyrsat(y, x) + lut_isqrt_cn[range+0*16];
y2 = mpyrsat(isqrt2, y);
if(!(expnt & 1)) y = y2;
expnt = 15-expnt;
ptr_ye[0] = (expnt>>1)-1;
ptr_ym[0] = y;
return;
}
#endif
/*======================================================================*/
.global visqrt64_asm
.balign 32
.type visqrt64_asm, @function
visqrt64_asm:
/*======================================================================*/
#define ptr_xm r0 //ptr to input data
#define ptr_expin r1 //qpoint of input data
#define ptr_ym r2 //mantissa of output data
#define ptr_ye r3 //corresponding exponents of output data
#define ptr_isqrt r4 //.global lut_isqrt_asm
/*======================================================================*/
#define c3 r6
#define c3c r5
#define cf r8
#define c0 r7
#define c2 r9
#define sqrt0p5 r10
#define c1 r12
#define c15 r13
#define c30 r14
#define isqrt0 v0
#define isqrt1 v1
#define isqrt2 v2
#define vcf v4
#define vc7ff v16
#define d0 v5
#define d1 v6
#define i1i0 v7
#define qexp0 v8
#define qexp1 v9
#define c21_c20 v11:10
#define c21 v11
#define c20 v10
#define c11_c10 v13:12
#define c11 v13
#define c10 v12
#define c01_c00 v15:14
#define c01 v15
#define c00 v14
#define vzero v19
#define vone v17
#define y1y0 v21:20
#define y1 v21
#define y0 v20
#define ny0 v22
#define ny1 v23
#define vc15 v25
#define vc30 v26
#define x1 v27
#define x0 v24
#define bit1 v29
#define bit0 v28
/*======================================================================*/
{
c1 = ##0x40004000
vzero = #0
} {
d0.cur = vmem(ptr_xm+#0) //[0, 1]
q0 = vcmp.gt(d0.h, vzero.h) //[0, 1]
c30 = ##0x001d001d
} {
qexp0 = vmem(ptr_expin+#0) //[0, 2]
vone = vsplat(c1)
vc30 = vsplat(c30)
c1.L = #0x0001
} {
c3c.L= #0x3c3c
c1.H = #0x0001
d0 = vmux(q0, d0, vone) //[0, 3]
qexp0 = vmux(q0, qexp0, vc30) //[0, 3]
} {
c3c.H= #0x3c3c
vone = vsplat(c1)
i1i0.b = vshuffo(d1.b, d0.b) //[0, 4]
} {
bit0 = vand(vone, qexp0) //[0, 5]
vcf = vsplat(c3c)
} {
cf = ##0x07ff07ff
qexp0.h = vsub(vzero.h, qexp0.h)
i1i0 = vand(i1i0,vcf) //[0, 6]
} {
c2 = #2
vc7ff = vsplat(cf)
d0.h = vadd(d0.h, d0.h) //[0, 7]
} {
isqrt2 = vmem(ptr_isqrt+#2)
i1i0.uh = vlsr(i1i0.uh, c2) //[0, 8]
c0 = #0
} {
x0 = vand(d0, vc7ff) //[0,9]
qexp0.h = vasr(qexp0.h, c1) //[0,9]
} {
c21_c20.h = vlut16(i1i0.b, isqrt2.h, c0) //[0,10]
} {
qexp0.h = vsub(qexp0.h, vone.h) //[0,11]
vmem(ptr_ye++#1) = qexp0.new //[0,11]
} {
isqrt1.tmp = vmem(ptr_isqrt+#1)
c11_c10.h = vlut16(i1i0.b, isqrt1.h, c0) //[0,12]
y0.h = vmpy(x0.h, c20.h):<<1:rnd:sat //[0,12]
} {
y0.h = vadd(y0.h, c10.h) //[0,13]
q0 = vcmp.eq(vzero.h, bit0.h) //[0,13]
} {
isqrt0.tmp = vmem(ptr_isqrt+#0)
c01_c00.h = vlut16(i1i0.b, isqrt0.h, c0) //[0,14]
} {
y0.h = vmpy(x0.h, y0.h):<<1:rnd:sat //[0,15]
} {
y0.h = vadd(y0.h, c00.h) //[0,16]
} {
sqrt0p5 = ##0x5a835a83 //1/sqrt(2)
} {
ny0.h = vmpy(y0.h, sqrt0p5.h):<<1:rnd:sat//[0,18]
} {
y0 = vmux(q0, y0, ny0) //[0,19]
vmem(ptr_ym+#0) = y0.new //[0,19]
}
jumpr r31
/*----------------------------------------------------------------*/
.L_end:
.size visqrt64_asm, .L_end-visqrt64_asm
/*======================================================================*/
/* end of file */
/*======================================================================*/
|
XiaoMi/nnlib | 24,581 | hexagon/asm_src/gvconv2db2b2b2u_d32_h_v65.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2db2b2b2u_d32_v65_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* INPUT : R0 : uint8_t *in_bufe
* R1 : uint8_t *in_bufo
* R2 : uint8_t *out_bufe
* R3 : uint8_t *out_bufo
* R4 : uint8_t *weights
* R5 : int32_t in_width
* SP+#0 : int32_t next_out_width
* SP+#4 : int32_t out_width
* SP+#8 : int32_t stride_h_w
* SP+#12 : int32_t in_depth
* SP+#16 : int32_t filt_width
* SP+#20 : int32_t filt_height
* SP+#24 : int32_t out_height
* SP+#28 : const int32_t *biasbuf
* SP+#32 : const int32_t *suma
* SP+#36 : int32_t next_suma_row
* SP+#40 : int32_t *ptr_minmax
* SP+#44 : int32_t recip
* SP+#48 : int32_t recip_shift
*/
.text
.global gvconv2db2b2b2u_d32_v65_asm
.balign 32
.type gvconv2db2b2b2u_d32_v65_asm, @function
gvconv2db2b2b2u_d32_v65_asm:
/*=============================================================================*/
#define SS (13*8)
#define APTR (SS+8)
#define in_bufe r0
#define in_bufo r1
#define in_bufo_in_bufe r1:0
#define out_bufe r2
#define out_bufo r3
#define out_bufo_out_bufe r3:2
#define weights r4
#define in_width r5
#define c_1w r7
#define c8w c_1w
#define xl03x00 r8
#define xh03x00 xl03x00
#define xl23x20 xl03x00
#define xh23x20 xl03x00
#define suma0 xl03x00
#define in_width_stride_depth xl03x00
#define sumabuf xl03x00
#define xl07x04 r9
#define xl27x24 xl07x04
#define xh27x24 xl07x04
#define xh07x04 xl07x04
#define next_out_width xl07x04
#define suma1 xl07x04
#define xl07x04_xl03x00 r9:8
#define xh07x04_xh03x00 xl07x04_xl03x00
#define xl27x24_xl23x20 xl07x04_xl03x00
#define xl13x10_xl03x00 xl07x04_xl03x00
#define xh27x24_xh23x20 xl07x04_xl03x00
#define xl33x30_xl23x20 xl07x04_xl03x00
#define xh13x10_xh03x00 xl07x04_xl03x00
#define xh33x30_xh23x20 xl07x04_xl03x00
#define next_out_width_in_width_stride_depth xl07x04_xl03x00
#define xl13x10 r10
#define xh13x10 xl13x10
#define in_bufet xl13x10
#define out_bufet xl13x10
#define recip_shift xl13x10
#define suma3 xl13x10
#define xl17x14 r11
#define xh17x14 xl17x14
#define in_bufot xl17x14
#define out_bufot xl17x14
#define recipshiftval xl17x14
#define suma2 xl17x14
#define xl17x14_xl13x10 r11:10
#define xl17x14_xl07x04 xl17x14_xl13x10
#define xh17x14_xh13x10 xl17x14_xl13x10
#define in_bufoet xl17x14_xl13x10
#define out_bufoet xl17x14_xl13x10
#define xh17x14_xh07x04 xl17x14_xl13x10
#define recipshiftval_recip_shift xl17x14_xl13x10
#define ptr_xl0 r12
#define ptr_xh0 r13
#define ptr_xl0_ptr_xh0 r13:12
#define ptr_xl1 r14
#define ptr_xh1 r15
#define ptr_wl r16
#define sumat ptr_wl
#define ptr_wh r17
#define sumainc ptr_wh
#define ptr_wh_wl r17:16
#define sumainc_sumat ptr_wh_wl
#define filt_wid r18
#define filt_ht r19
#define suma r20
#define stride_w r21
#define ptr_ze r22
#define ptr_zo r23
#define ptr_zo_ptr_ze r23:22
#define out_y r24
#define out_x4 r25
#define xl33x30 r26
#define xh33x30 xl33x30
#define xl37x34 r27
#define xh37x34 xl37x34
#define xl37x34_xl33x30 r27:26
#define xl37x34_xl27x24 xl37x34_xl33x30
#define xh37x34_xh33x30 xl37x34_xl33x30
#define xh37x34_xh27x24 xl37x34_xl33x30
#define stride_w4 r28
#define in_width_4 r30
#define next_outputs r31
/*=============================================================================*/
#define sll0 v0
#define sll1 v1
#define sll1_sll0 v1:0
#define sll2 v2
#define sll3 v3
#define sll3_sll2 v3:2
#define shl0 v4
#define shl1 v5
#define shl1_shl0 v5:4
#define shl2 v6
#define shl3 v7
#define shl3_shl2 v7:6
#define shh0 v8
#define s0 shh0
#define vsuma0 shh0
#define shh1 v9
#define s1 shh1
#define vsuma1 shh1
#define shh1_shh0 v9:8
#define shh2 v10
#define s2 shh2
#define vsuma2 shh2
#define shh3 v11
#define vsuma3 shh3
#define shh3_shh2 v11:10
#define wh0 v12
#define wh1 v13
#define wl0 v14
#define wl1 v15
#define min_val v16
#define max_val v17
#define recipvec v18
#define wsum v19
#define constw80 v20
#define sk v21
#define y0 v22
#define y1 v23
#define y1y0 v23:22
#define y2 v24
#define y3 v25
#define s3 v26
/*=============================================================================*/
#define off_ptr_wl ( 7*8+0)
#define off_ptr_wh ( 7*8+4)
#define off_in_bufe ( 8*8+0)
#define off_in_bufo ( 8*8+4)
#define off_out_bufe ( 9*8+0)
#define off_out_bufo ( 9*8+4)
#define off_in_width_stride_depth (10*8+0)
#define off_next_out_width (10*8+4)
#define off_recip (11*8+0)
/*=============================================================================*/
{ allocframe(#SS) //
memd(R29+#0*8-APTR) = R17:16 //
r8 = #0x80 //
sll0 = #0 //
}
{ memd(R29+#1*8) = R19:18 //
memd(R29+#2*8) = R21:20 //
constw80 = VSPLAT(r8) //
sll1 = #0 //
}
{ memd(R29+#3*8) = R23:22 //
memd(R29+#4*8) = R25:24 //
sll2 = #0 //
sll3 = #0 //
}
{ memd(R29+#5*8) = R27:26 //
memd(R29+#6*8) = R31:30 //
shl0 = constw80 //
shl1 = constw80 //
}
{ r9:8 = memd(r29+#APTR+0) // out_width|next_out_width
r11:10 = memd(r29+#APTR+8) // in_depth|stride_h_w
shl2 = constw80 //
shl3 = constw80 //
}
{ r13:12 = memd(r29+#APTR+16) // filt_height|filt_width
r15:14 = memd(r29+#APTR+24) // biasbuf|out_height
r22 = zxth(r10) // stride_w
r10 = lsr(r10,#16) // stride_h
}
{ r1:0 = memd(r29+#APTR+40) // recip|ptr_minmax
memd(r29+#off_in_bufe) = in_bufo_in_bufe //
filt_wid = asl(r12,#2) //filt_wid = filt_width*4
filt_ht = asr(r11,#5) // in_depth>>5
}
{ r6 = memw(r29+#APTR+48) // recip_shift
max_val = vmem(r0+#0) //
r22 = asl(r22,#2) // stride_w*4
r7 = #1 //
}
{ min_val = vmem(r0+#1) //
recipvec = VSPLAT(r1) //
memw(r29+#APTR+8) = r22 // stride_w
stride_w4 = asl(r22,#4) // stride_w4*2
}
{ wsum = vmem(r15+#0) // biasbuf
r23 = mpyi(in_width,r11) // in_width,in_depth
in_width_4 = asl(in_width,#5) //
memd(r29+#off_out_bufe) = out_bufo_out_bufe //
}
{ r23 = mpyi(r23,r10) //in_width,in_depth*stride_h
r7 = asl(r7,r6) //
memw(r29+#off_next_out_width) = r8 //
memw(r29+#off_ptr_wl) = weights //
}
{ memw(r29+#off_in_width_stride_depth) = r23 //
r7 = combine(r7.l,r7.l) //
ptr_wh = mpyi(filt_wid,r13) // filt_wid*filt_height
filt_ht = mpyi(r13,filt_ht) // filt_height*in_depth>>5
}
{ memd(r29+#off_recip) = r7:6 //
ptr_wh = mpyi(ptr_wh,r11) // filt_width*filt_height*in_depth
next_outputs = mpyi(filt_ht,in_width_4) //
suma = memw(r29+#APTR+32) // recip|ptr_minmax
}
{ ptr_wh = addasl(weights,ptr_wh,#3) //
next_outputs += mpyi(stride_w4,#-2) // -2*(stride_w4*2)
stride_w = memw(r29+#APTR+8) //
}
{ memw(r29+#off_ptr_wh) = ptr_wh //
m0 = stride_w4 // stride_w4*2
r22 = neg(stride_w4) // -2*stride_w4
suma0 = memw(suma+#0) //
}
{ r22 = add(r22,#+8) // -2*tsride_w4+8
vsuma0 = vsplat(suma0) //
suma1 = memw(suma+stride_w<<#0) //
suma2 = memw(suma+stride_w<<#1) //
}
{ m1 = r22 // -2*tsride_w4+8
shh0.w = vadd(wsum.w,vsuma0.w) //
suma += mpyi(stride_w,#3) //
}
{ vsuma1 = vsplat(suma1) //
vsuma2 = vsplat(suma2) //
suma3 = memw(suma+#0) //
suma = add(suma,stride_w) //
}
{ shh1.w = vadd(wsum.w,vsuma1.w) //
shh2.w = vadd(wsum.w,vsuma2.w) //
vsuma3 = vsplat(suma3) //
}
{ shh3.w = vadd(wsum.w,vsuma3.w) //
out_y = memw(r29+#APTR+24) // out_height
c8w = #8 //
stride_w4 = asr(stride_w4,#1) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{ ptr_xl0_ptr_xh0 = memd(r29+#off_in_bufe) //
next_out_width_in_width_stride_depth = memd(r29+#off_in_width_stride_depth)//
out_y = add(out_y, #-1) //
p0 = cmp.eq(out_y,#1) // last iteration?
}
{ in_bufet = add(ptr_xl0,in_width_stride_depth) //
in_bufot = add(ptr_xh0,in_width_stride_depth) //
ptr_zo_ptr_ze = memd(r29+#off_out_bufe) //
sumainc_sumat = memd(r29+#APTR+32) //
}
{ memd(r29+#off_in_bufe) = in_bufoet //
#if defined(SPLIT_OUTPUT)
out_bufet = add(ptr_ze,next_out_width) //
#else
out_bufet = addasl(ptr_ze,next_out_width,#1) //
#endif
out_bufot = add(ptr_zo,next_out_width) //
sumat = add(sumat,sumainc) //
}
{ out_x4 = memw(r29+#APTR+4) //
if (!p0) memw(r29+#APTR+32) = sumat //
loop1(.L_filt_height,filt_ht) //[p2]
}
{ memd(r29+#off_out_bufe) = out_bufoet //
out_x4 = add(out_x4,#-4) //[p2]
p1 = cmp.gt(out_x4,#4) //[p2] last iteration
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
.L_filt_height:
{ xl07x04_xl03x00 = memd(ptr_xl0+#0) //[p0]
xl17x14_xl13x10 = memd(ptr_xl0+stride_w4<<#0) //[p0]
ptr_xl1 = addasl(ptr_xl0,stride_w,#4) //
p3 = sp1loop0(.L_filt_width, filt_wid) //
}
{ ptr_xh1 = ptr_xh0 //
ptr_xh0 = add(ptr_xh0,in_width_4) //
xl07x04 = xl13x10 //
xl13x10 = xl07x04 //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_width:
{ wh0.cur = vmem(ptr_wh++#1) //
shl1_shl0.uw += vrmpy(wh0.ub,xl13x10_xl03x00.ub) //
xl37x34_xl33x30 = memd(ptr_xl1+stride_w4<<#0) //
if (!p3) ptr_xl0 = add(ptr_xl0,in_width_4) //
}
{ wl0.cur = vmem(ptr_wl++#1) //
sll1_sll0.uw += vrmpy(wl0.ub,xl13x10_xl03x00.ub) //
xl27x24_xl23x20 = memd(ptr_xl1++M1) //
}
{ wh1.cur = vmem(ptr_wh++#1) //
shl1_shl0.uw += vrmpy(wh1.ub,xl17x14_xl07x04.ub) //
xl27x24 = xl33x30 //
xl33x30 = xl27x24 //
}
{ wl1.cur = vmem(ptr_wl++#1) //
sll1_sll0.uw += vrmpy(wl1.ub,xl17x14_xl07x04.ub) //
}
{ shl3_shl2.uw += vrmpy(wh0.ub,xl33x30_xl23x20.ub) //
sll3_sll2.uw += vrmpy(wl0.ub,xl33x30_xl23x20.ub) //
xh07x04_xh03x00 = memd(ptr_xh1++M0) //
xh17x14_xh13x10 = memd(ptr_xh1+stride_w4<<#0) //
}
{ shl3_shl2.uw += vrmpy(wh1.ub,xl37x34_xl27x24.ub) //
sll3_sll2.uw += vrmpy(wl1.ub,xl37x34_xl27x24.ub) //
xh07x04 = xh13x10 //
xh13x10 = xh07x04 //
}
{ shl1_shl0.uw += vrmpy(wl0.ub,xh13x10_xh03x00.ub) //
shh1_shh0.uw += vrmpy(wh0.ub,xh13x10_xh03x00.ub) //
xh27x24_xh23x20 = memd(ptr_xh1++M1) //
xh37x34_xh33x30 = memd(ptr_xh1+stride_w4<<#0) //
}
{ shl1_shl0.uw += vrmpy(wl1.ub,xh17x14_xh07x04.ub) //
shh1_shh0.uw += vrmpy(wh1.ub,xh17x14_xh07x04.ub) //
xh27x24 = xh33x30 //
xh33x30 = xh27x24 //
}
{ shl3_shl2.uw += vrmpy(wl0.ub,xh33x30_xh23x20.ub) //
shh3_shh2.uw += vrmpy(wh0.ub,xh33x30_xh23x20.ub) //
xl07x04_xl03x00 = memd(ptr_xl1++M0) //[p0]
xl17x14_xl13x10 = memd(ptr_xl1+stride_w4<<#0) //[p0]
}
{ shl3_shl2.uw += vrmpy(wl1.ub,xh37x34_xh27x24.ub) //
shh3_shh2.uw += vrmpy(wh1.ub,xh37x34_xh27x24.ub) //
xl07x04 = xl13x10 //
xl13x10 = xl07x04 //
}:endloop0:endloop1
{ shl0.w += vasr(sll0.w,c8w) //
recipshiftval_recip_shift = memd(r29+#off_recip) //
sll0 = #0 //
sumabuf = memw(r29+#APTR+32) //
}
{ shl1.w += vasr(sll1.w,c8w) //
ptr_xl0 = sub(ptr_xl0,next_outputs) //
ptr_xh0 = sub(ptr_xh0,next_outputs) //
if (!p1) suma = sumabuf //
}
{ shh0.w += vasr(shl0.w,c8w) //
loop1(.L_filt_height,filt_ht) //[p2]
shl0 = constw80 //
suma0 = memw(suma+#0) //
}
{ shh1.w += vasr(shl1.w,c8w) //
min_val.w = vmin(min_val.w,shh0.w) //
max_val.w = vmax(max_val.w,shh0.w) //
sk = shh0 //
}
{ s0.w = vmpyi(shh0.w,recipshiftval.h) //s0=Q6_Vw_vasl_VwR(shh0.h,recip_shift)
shl2.w += vasr(sll2.w,c8w) //
shl1 = constw80 //
p2 = cmp.gt(out_x4,#1-4) //should s1 be included ?
}
{ if (p2) sk = shh1 //
s1.w = vmpyi(shh1.w,recipshiftval.h) //s1=Q6_Vw_vasl_VwR(shh1.h,recip_shift)
shl3.w += vasr(sll3.w,c8w) //
suma1 = memw(suma+stride_w<<#0) //
}
{ shh2.w += vasr(shl2.w,c8w) //
y0.w = vmpye(s0.w,recipvec.uh) //
min_val.w = vmin(min_val.w,sk.w) //
p2 = cmp.gt(out_x4,#2-4) //should s2 be included ?
}
{ shh3.w += vasr(shl3.w,c8w) //
y0.w += vmpyo(s0.w,recipvec.h):<<1:rnd:sat:shift //
max_val.w = vmax(max_val.w,sk.w) //
suma2 = memw(suma+stride_w<<#1) //
}
{ s2.w = VASL(shh2.w,recip_shift) //
y1.w = vmpye(s1.w,recipvec.uh) //
if (p2) sk = shh2 //
p2 = cmp.gt(out_x4,#3-4) //should s3 be included ?
}
{ s3.w = VASL(shh3.w,recip_shift) //
y1.w += vmpyo(s1.w,recipvec.h):<<1:rnd:sat:shift //
min_val.w = vmin(min_val.w,sk.w) //
suma += mpyi(stride_w,#3) //
}
{ y2.w = vmpye(s2.w,recipvec.uh) //
max_val.w = vmax(max_val.w,sk.w) //
if (p2) sk = shh3 //
suma3 = memw(suma+#0) //
}
{ y2.w += vmpyo(s2.w,recipvec.h):<<1:rnd:sat:shift //
min_val.w = vmin(min_val.w,sk.w) //
max_val.w = vmax(max_val.w,sk.w) //
suma = add(suma,stride_w) //
}
{ y3.w = vmpye(s3.w,recipvec.uh) //
sll2 = #0 //
sll3 = #0 //
c_1w = #-1 //
}
{ y3.w += vmpyo(s3.w,recipvec.h):<<1:rnd:sat:shift //
shl2 = constw80 //
shl3 = constw80 //
ptr_wh_wl = memd(r29+#off_ptr_wl) //[p2]
}
{ y1.uh = vpack(y1.w,y0.w):sat //
vsuma0 = vsplat(suma0) //
vsuma1 = vsplat(suma1) //
sll1 = #0 //
}
{ y3.uh = vpack(y3.w,y2.w):sat //
shh0.w = vadd(wsum.w,vsuma0.w) //
vsuma2 = vsplat(suma2) //
vsuma3 = vsplat(suma3) //
}
{ shh1.w = vadd(wsum.w,vsuma1.w) //
shh2.w = vadd(wsum.w,vsuma2.w) //
shh3.w = vadd(wsum.w,vsuma3.w) //
}
#if defined(SPLIT_OUTPUT)
{ y1y0 = vdeal(y3,y1,c_1w) //
vmem(ptr_ze++#1) = y0.new //
#else
{ vmem(ptr_ze++#1) = y1 //
#endif
c8w = #8 //
}
#if defined(SPLIT_OUTPUT)
{ vmem(ptr_zo++#1) = y1 //
#else
{ vmem(ptr_ze++#1) = y3 //
#endif
if (p1) jump:t .L_width //
out_x4 = add(out_x4,#-4) //[p2]
p1 = cmp.gt(out_x4,#4) //[p2] last iteration
} //end cols per line
/* ---------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_y, #0) //
if(!p0.new) jump:t .L_height //
} //end lines per block
/* ---------------------------------------------------------------------------- */
{ r0 = memw(r29+#APTR+40) // ptr_minmax
}
{ vmem(r0+#0) = max_val //
R17:16 = memd(R29+#0*8) // restore callee-saved registers
}
{ vmem(r0+#1) = min_val //
R19:18 = memd(R29+#1*8) // restore callee-saved registers
}
{ R21:20 = memd(R29+#2*8) // restore callee-saved registers
R23:22 = memd(R29+#3*8) // restore callee-saved registers
}
{ R25:24 = memd(R29+#4*8) // restore callee-saved registers
R31:30 = memd(R29+#6*8) // restore callee-saved registers
}
{ R27:26 = memd(R29+#5*8) // restore callee-saved registers
DEALLOC_RETURN // return
}
.L_end:
/*=============================================================================*/
.size gvconv2db2b2b2u_d32_v65_asm, .L_end-gvconv2db2b2b2u_d32_v65_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 22,030 | hexagon/asm_src/gemmpybbw_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gemm matrix multiply, result left at 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> 16*K*N/32+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 1040 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%8=0 M%128=0 */
/* C MODEL */
/* N = Nlen */
/* K = Klen | Kstride */
/* M = Mlen | Mstride */
/*======================================================================*/
#if 0
void gemmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gemmpybbw_h.S"
.global gemmpybbw_asm
.balign 32
.type gemmpybbw_asm, @function
gemmpybbw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define n r3 //n %8 number of patches
#define m r4 //is stride of weights matrix k*32 always 32 wide
#define k r5 //ksize %16 | k - stride
/*=============================================================================*/
#define ksize r28 //amount of data in this job
#define ki r9 //
#define kstride7 r8 //
#define kjump r4 //16-8kstride
#define ptr_y r6 //
#define l1xptri0 r7 //
#define l1xptri0_ptr_y r7:6 //
#define l1xptri1 r10 //
#define l1xptri2 r11 //
#define l1xptri3 r12 //
#define l1xptr r13 //
#define skip r14 //
#define back r15 //
#define kk M0 //
#define mm M1 //
#define x07x04x03x00 r17:16 //1111-----------1
#define x0fx0cx0bx08 r23:22 //11-------------1
#define x17x14x13x10 r19:18 //1111------------
#define x1fx1cx1bx18 r21:20 //11--------------
#define x27x24x23x20 r21:20 //---111----------
#define x2fx2cx2bx28 r23:22 //---11111--------
#define x37x34x33x30 r19:18 //----11----------
#define x3fx3cx3bx38 r17:16 //----1111--------
#define x47x44x43x40 r21:20 //-------111------
#define x4fx4cx4bx48 r19:18 //-------11111----
#define x57x54x53x50 r25:24 //--------11------
#define x5fx5cx5bx58 r17:16 //--------1111----
#define x67x64x63x60 r23:22 //----------1111--
#define x6fx6cx6bx68 r25:24 //----------111111
#define x77x74x73x70 r27:26 //-----------111--
#define x7fx7cx7bx78 r21:20 //-----------11111
#define x03x00 r16 //1111-----------1
#define x0bx08 r22 //11-------------1
#define x13x10 r18 //1111------------
#define x1bx18 r20 //11--------------
#define x23x20 r20 //---111----------
#define x2bx28 r22 //---11111--------
#define x33x30 r18 //----11----------
#define x3bx38 r16 //----1111--------
#define x43x40 r20 //-------111------
#define x4bx48 r18 //-------11111----
#define x53x50 r24 //--------11------
#define x5bx58 r16 //--------1111----
#define x63x60 r22 //----------111---
#define x6bx68 r24 //----------11111-
#define x73x70 r26 //-----------111--
#define x7bx78 r20 //-----------11111
#define x07x04 r17 //1111-----------1
#define x0fx0c r23 //11-------------1
#define x17x14 r19 //1111------------
#define x1fx1c r21 //11--------------
#define x27x24 r21 //---111----------
#define x2fx2c r23 //---11111--------
#define x37x34 r19 //----11----------
#define x3fx3c r17 //----1111--------
#define x47x44 r21 //-------111------
#define x4fx4c r19 //-------11111----
#define x57x54 r25 //--------11------
#define x5fx5c r17 //--------1111----
#define x67x64 r23 //----------111---
#define x6fx6c r25 //----------11111-
#define x77x74 r27 //-----------111--
#define x7fx7c r21 //-----------11111
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define z4 v4 //
#define z5 v5 //
#define z5z4 v5:4 //
#define z6 v6 //
#define z7 v7 //
#define z7z6 v7:6 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vzero v12 //
/*=============================================================================*/
{ allocframe(#64) //
} {
memw(sp+#48) = r28 //
vzero = #0 //
ksize = lsr(k, #16) //extract work
k = zxth(k) //extract stride
} {
m = asl(m, #2) //ints
kk = k //stride k
} {
ki = lsr(ksize, #4) //k / 16
kstride7 = asl(k, #3) //3*kstride
memd(sp+#0) = r17:16 //
} {
kstride7 = sub(kstride7, k) //
l1xptr = ptr_x //asl(ptr_x, k, #3) //#48) //l1 fetch 32 +26 bytes ahead
n = lsr(n, #3) //divide by 8
memd(sp+#8) = r19:18 //
} {
mm = m //
kjump = sub(#16, kstride7) //zag back to next column of lines
ki = add(ki, #-1) //
} {
memd(sp+#16) = r21:20 //
kstride7 += sub(k, ksize) //correction factor
k = add(k, k) //2*k
l1xptri0 = add(l1xptr, #80) //[ , P]advance first fetches by 64
} {
loop1(.L_loopN, n) //[ , P]for(i=0; i < n; i+=4){
l1xptri1 = add(l1xptr, k) //[ , P]make temp copy
memd(sp+#24) = r23:22 //
memd(sp+#32) = r25:24 //
} {
memd(sp+#40) = r27:26 //
ptr_y = ptr_yi //[ , P]
loop0(.L_loopK, ki) //[P, 9]ki is k1/4 - 2
l1xptri2 = add(l1xptri1, k) //[ , P]make temp copy
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
dcfetch(l1xptri0+#0) //[0, 0]prefetch next line
skip = lsr(k, #1) //next line += k
l1xptri3 = add(l1xptri2, k) //[ , P]make temp copy
} {
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
l1xptri0 = add(l1xptri0, skip) //[0, 1]next line
back = sub(#32, skip) //skip back
l1xptr= addasl(l1xptr,k,#2) //[P, ]advance by 8k strip
} {
x0fx0cx0bx08 = memd(ptr_x+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x++kk) //[0, 2]
z1z0 = vcombine(vzero, vzero) //[P, 0]
z3z2 = vcombine(vzero, vzero) //[P, 0]
} {
x1fx1cx1bx18 = memd(ptr_x+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x++kk) //[0, 3]
z5z4 = vcombine(vzero, vzero) //[P, 0]
z7z6 = vcombine(vzero, vzero) //[P, 0]
}
/*============================================================================*/
.balign 32
.L_loopN:
.L_loopK:
{
y2.cur = vmem(ptr_y++#2) //[0, 4]32x4
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 4]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 4]
dcfetch(l1xptri1+#0) //[0, 4]prefetch next line
} {
y3.cur = vmem(ptr_y+#-1) //[0, 5]32x4
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 5]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 5]
l1xptri1 = add(l1xptri1, skip) //[0, 5]next line
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 6]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 6]
x2fx2cx2bx28 = memd(ptr_x+#8) //[0, 6]
x27x24x23x20 = memd(ptr_x++kk) //[0, 6]
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 7]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 7]
x3fx3cx3bx38 = memd(ptr_x+#8) //[0, 7]
x37x34x33x30 = memd(ptr_x++kk) //[0, 7]
} {
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 8]
dcfetch(l1xptri2+#0) //[0, 8]prefetch next line
l1xptri2 = add(l1xptri2, skip) //[0, 8]next line
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 9]
dcfetch(l1xptri3+#0) //[0, 9]prefetch next line
l1xptri3 = add(l1xptri3, skip) //[0, 9]next line
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,10]
x4fx4cx4bx48 = memd(ptr_x+#8) //[0,10]
x47x44x43x40 = memd(ptr_x++kk) //[0,10]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,11]
x5fx5cx5bx58 = memd(ptr_x+#8) //[0,11]
x57x54x53x50 = memd(ptr_x++kk) //[0,11]
} {
z4.uw += vrmpy(y0.ub, x43x40.ub) //[0,12]
z5.uw += vrmpy(y0.ub, x53x50.ub) //[0,12]
skip = back //[0,12]next line
back = skip //[0,12]previous line + 32
} {
z4.uw += vrmpy(y1.ub, x47x44.ub) //[0,13]
z5.uw += vrmpy(y1.ub, x57x54.ub) //[0,13]
x6fx6cx6bx68 = memd(ptr_x+#8) //[0,13]
x67x64x63x60 = memd(ptr_x++kk) //[0,13]
} {
z4.uw += vrmpy(y2.ub, x4bx48.ub) //[0,14]
z5.uw += vrmpy(y2.ub, x5bx58.ub) //[0,14]
x7fx7cx7bx78 = memd(ptr_x+#8) //[0,14]
x77x74x73x70 = memd(ptr_x+#0) //[0,14]
} {
z4.uw += vrmpy(y3.ub, x4fx4c.ub) //[0,15]
z5.uw += vrmpy(y3.ub, x5fx5c.ub) //[0,15]
ptr_x = add(ptr_x, kjump) //[0,15]
} {
z6.uw += vrmpy(y0.ub, x63x60.ub) //[0,16]
z7.uw += vrmpy(y0.ub, x73x70.ub) //[0,16]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(l1xptri0+#0) //[1, 0]prefetch next line
} {
z6.uw += vrmpy(y1.ub, x67x64.ub) //[0,17]
z7.uw += vrmpy(y1.ub, x77x74.ub) //[0,17]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
l1xptri0 = add(l1xptri0, skip) //[1, 1]next line
} {
z6.uw += vrmpy(y2.ub, x6bx68.ub) //[0,18]
z7.uw += vrmpy(y2.ub, x7bx78.ub) //[0,18]
x0fx0cx0bx08 = memd(ptr_x+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x++kk) //[1, 2]
} {
z6.uw += vrmpy(y3.ub, x6fx6c.ub) //[0,19]
z7.uw += vrmpy(y3.ub, x7fx7c.ub) //[0,19]
x1fx1cx1bx18 = memd(ptr_x+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x++kk) //[1, 3]
}:endloop0
{
y2.cur = vmem(ptr_y++#2) //[1, 4]32x4
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 4]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 4]
l1xptri0=l1xptr //[P, ]
} {
y3.cur = vmem(ptr_y+#-1) //[1, 5]32x4
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 5]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 5]
l1xptri1 = add(l1xptr, k) //[ , P]make temp copy
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 6]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 6]
x2fx2cx2bx28 = memd(ptr_x+#8) //[1, 6]
x27x24x23x20 = memd(ptr_x++kk) //[1, 6]
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 7]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 7]
x3fx3cx3bx38 = memd(ptr_x+#8) //[1, 7]
x37x34x33x30 = memd(ptr_x++kk) //[1, 7]
} {
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 8]
l1xptri2 = add(l1xptri1, k) //[ , P]make temp copy
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 9]
l1xptri3 = add(l1xptri2, k) //[ , P]make temp copy
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,10]
x4fx4cx4bx48 = memd(ptr_x+#8) //[1,10]
x47x44x43x40 = memd(ptr_x++kk) //[1,10]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,11]
x5fx5cx5bx58 = memd(ptr_x+#8) //[1,11]
x57x54x53x50 = memd(ptr_x++kk) //[1,11]
} {
z4.uw += vrmpy(y0.ub, x43x40.ub) //[1,12]
z5.uw += vrmpy(y0.ub, x53x50.ub) //[1,12]
vmem(ptr_z++mm) = z0 //[E, ]
z0 = #0
} {
z4.uw += vrmpy(y1.ub, x47x44.ub) //[1,13]
z5.uw += vrmpy(y1.ub, x57x54.ub) //[1,13]
x6fx6cx6bx68 = memd(ptr_x+#8) //[1,13]
x67x64x63x60 = memd(ptr_x++kk) //[1,13]
} {
z4.uw += vrmpy(y2.ub, x4bx48.ub) //[1,14]
z5.uw += vrmpy(y2.ub, x5bx58.ub) //[1,14]
x7fx7cx7bx78 = memd(ptr_x+#8) //[1,14]
x77x74x73x70 = memd(ptr_x+#0) //[1,14]
} {
z4.uw += vrmpy(y3.ub, x4fx4c.ub) //[1,15]
z5.uw += vrmpy(y3.ub, x5fx5c.ub) //[1,15]
ptr_x = add(ptr_x, kjump) //[1,15]
vmem(ptr_z++mm) = z1 //[E, ]
} {
z6.uw += vrmpy(y0.ub, x63x60.ub) //[1,16]
z7.uw += vrmpy(y0.ub, x73x70.ub) //[1,16]
vmem(ptr_z++mm) = z2 //[E, ]
ptr_x = add(ptr_x, kstride7) //[E, ]
} {
z6.uw += vrmpy(y1.ub, x67x64.ub) //[1,17]
z7.uw += vrmpy(y1.ub, x77x74.ub) //[1,17]
vmem(ptr_z++mm) = z3 //[E, ]
z1 = #0 //[P, 0]
} {
z6.uw += vrmpy(y2.ub, x6bx68.ub) //[1,18]
z7.uw += vrmpy(y2.ub, x7bx78.ub) //[1,18]
vmem(ptr_z++mm) = z4 //[E, ]
z2 = #0 //[P, 0]
} {
z6.uw += vrmpy(y3.ub, x6fx6c.ub) //[1,19]
z7.uw += vrmpy(y3.ub, x7fx7c.ub) //[1,19]
vmem(ptr_z++mm) = z5 //[E, ]
z3 = #0 //[P, 0]
} {
l1xptr= addasl(l1xptr,k,#2) //[P, ]advance by 8k strip
vmem(ptr_z++mm) = z6 //[E, ]
skip = lsr(k, #1) //[P, ]
} {
back = sub(#32, skip) //
vmem(ptr_z++mm) = z7 //[E, ]
loop0(.L_prefetch, #3) //2 lines ahead
}
.balign 32
.L_prefetch:
{
dcfetch(l1xptri0+#0) //[E, 4]prefetch next line
l1xptri0 = add(l1xptri0, skip)
} {
dcfetch(l1xptri1+#0) //[E, 4]prefetch next line
l1xptri1 = add(l1xptri1, skip)
} {
dcfetch(l1xptri2+#0) //[E, 9]prefetch next line
l1xptri2 = add(l1xptri2, skip)
} {
dcfetch(l1xptri3+#0) //[E, 8]prefetch next line
l1xptri3 = add(l1xptri3, skip)
} {
dcfetch(l1xptri0+#0) //[E, 4]prefetch next line
l1xptri0 = add(l1xptri0, back)
} {
dcfetch(l1xptri1+#0) //[E, 4]prefetch next line
l1xptri1 = add(l1xptri1, back)
} {
dcfetch(l1xptri2+#0) //[E, 9]prefetch next line
l1xptri2 = add(l1xptri2, back)
} {
dcfetch(l1xptri3+#0) //[E, 8]prefetch next line
l1xptri3 = add(l1xptri3, back)
}:endloop0
{
x0fx0cx0bx08 = memd(ptr_x+#8) //[0, 2]
dcfetch(l1xptri0+#0) //[0, 0]prefetch next line
ptr_y = ptr_yi //
z4 = #0
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
x07x04x03x00 = memd(ptr_x++kk) //[0, 2]
l1xptri0 = add(l1xptri0, skip) //[0, 1]next line
z5 = #0
} {
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
x1fx1cx1bx18 = memd(ptr_x+#8) //[0, 3]
z6 = #0
} {
z7 = #0
x17x14x13x10 = memd(ptr_x++kk) //[0, 3]
loop0(.L_loopK, ki) //[P, 9]ki is k1/4 - 2
#ifdef TOOL_V_8_0_X_ASSEMBLER_ISSUE_WORKAROUND_EXTRA_NOP
} {
nop //NOTE: THIS EXTRA NOP PACKET ADDED TO WORK AROUND TOOL V.8.0.X ISSUE (JIRA) [QTOOL-27821] - NOP NOT NEEDED FOR TOOLS V.7.2 & V8.1
#endif
}:endloop1
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
r28 = memw(sp+#48) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gemmpybbw_asm, .L_end-gemmpybbw_asm
|
XiaoMi/nnlib | 17,028 | hexagon/asm_src/dwconv2dbbb_s2_3xN_h.S | /* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s2_3xN_list_h.S"
.global dwconv2dbbb_s2_3xN_asm
.balign 32
.type dwconv2dbbb_s2_3xN_asm, @function
dwconv2dbbb_s2_3xN_asm:
/* ----------------------------------------------------------------------------- */
//I/O registers
#define in_buf r0 //
#define filt r1 //
#define out_buf r2 //
#define next_in_width_depth r3 //
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //
#define next_out_width_32 r10 //
#define depth r11 //
#define out_width r12 //
#define out_height r13 //
#define filt_height r25 //
#define filt_zero r7 //
#define bias_sum r14 //
#define ptr_max r15 //
#define recip_level r10 //
#define recip_shift r8 //
#define stride_v r28 //
#define sbuf_base r4 //
//scaler register
#define ptr_w0 r16 //
#define ptr_w1 r17 //
#define width_cnt r6 //
#define bias_ptr r18 //
#define ptr_x0 r19 //
#define ptr_x1 r20 //
#define ptr_xin r22 //
#define ptr_y r23 //
#define depth_cnt r26 //
#define filt_size r9 //
#define next_in_width_depth_stride r28//
#define zzzz r7 //
#define _zzz r7 //
#define sbuf r21 //
//vector registers
#define vrecip v0 //
#define vshamt_vec v1 //
#define max v2 //
#define min v3 //
#define bias_val v4 //
#define x0 v5 //
#define x1 v6 //
#define x2 v7 //
#define xbxax9x8 v9 //
#define x7x6x5x4 v10 //
#define x3x2x1x0 v11 //
#define w_210 v8 //
#define x3x2x3x2 v26 //
#define x5x4x3x2 v27 //
#define x7x6x7x6 v28 //
#define x9x8x7x6 v29 //
#define s0 v12 //
#define s1 v13 //
#define s2 v14 //
#define s3 v15 //
#define z0 v16 //
#define z1 v17 //
#define z2 v18 //
#define z3 v19 //
#define d0 v20 //
#define d1 v21 //
#define d1d0 v24 //
#define d2 v22 //
#define d3 v23 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC)//debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC)//debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on sbuf (56+8)/4=20
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
} {
next_out_width_32 = memw(sp+#16<<2) //
depth = memw(sp+#17<<2) //
} {
out_width = memw(sp+#18<<2) //
out_height = memw(sp+#19<<2) //
depth = lsr(depth, #5) //depth/32
} {
filt_height = memw(sp+#20<<2) //
filt_zero = memw(sp+#21<<2) //
out_width = add(out_width, #3) //
} {
bias_sum = memw(sp+#22<<2) //
ptr_max = memw(sp+#23<<2) //
out_width = lsr(out_width, #2) //
} {
memw(sp+#17<<2) = depth //
zzzz = vsplatb(filt_zero) //
} {
recip_shift = memw(sp+#25<<2) //
stride_v = memw(sp+#26<<2) //
} {
sbuf_base = memw(sp+#27<<2) //scratchpad buffer 128*filt_height bytes
vshamt_vec= vsplat(recip_shift) //
next_in_width_depth_stride = mpyi(next_in_width_depth,stride_v) //
} {
max = vmem(ptr_max+#0) //
filt_size = filt_height //
filt_height = add(filt_height, #-2) //
} {
_zzz = lsr(zzzz, #8) //
min = vmem(ptr_max+#1) //
depth_cnt = memw(sp+#17<<2) //depth
p2 = !cmp.gt(filt_height, #0) //<=0
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{
recip_level = memw(sp+#24<<2)
}
{ bias_ptr = bias_sum //
ptr_xin = in_buf
ptr_w0 = filt
out_height = add(out_height, #-1) //
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{
vrecip = vmem(recip_level++#1)
}
{
memw(sp+#52) = recip_level
}
{ bias_val = vmem(bias_ptr++#1) //
ptr_x0 = ptr_xin //
ptr_y = out_buf //
loop1(.L_width, out_width)
} {
x1 = vmemu(ptr_x0+#1) //[0, 0]
loop0(.L_init, filt_size)
ptr_w1 = ptr_w0 //[WIDTH, P]
} {
sbuf = sbuf_base
ptr_x1 = ptr_x0 //[WIDTH, P]
width_cnt = memw(sp+#18<<2) //
}
/* --------------------------------------------------------------------------- */
.L_init:
{ x3x2x1x0 = vmemu(ptr_x1+#0) //[0, 0]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, 0]
} {
x3x2x1x0.b = vshuff(x3x2x1x0.b) //[0, 1]
} {
x3x2x1x0.b = vshuff(x3x2x1x0.b) //[0, 2]
vmem(sbuf++#1) = x3x2x1x0.new
}:endloop0
{ loop0(.L_vert, filt_height) //[WIDTH, P]
p3 = !cmp.eq(r0, r0) //[WIDTH]
x1.b = vshuff(x1.b) //[0, 3]
ptr_x1 = ptr_x0 //[WIDTH, P]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ x2 = vmemu(ptr_x1+#2) //[0, 4]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, 4]
ptr_x0 = add(ptr_x0, #256) //[WIDTH,P]+4 32 depths for stride 2
} {
x7x6x5x4.b = vshuff(x1.b) //[0, 5]
x3x2x1x0 = vmem(sbuf_base) //[0, 5]
width_cnt = add(width_cnt, #-4) //
sbuf = sbuf_base //
} {
x2.b = vshuff(x2.b) //[0, 6]
w_210 = vmem(ptr_w1++#1) //[0, 6]
s1 = bias_val //[WIDTH, P]
s0 = bias_val //[WIDTH, P]
}
/* --------------------------------------------------------------------------- */
{ x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[0, 7]
x1 = vmemu(ptr_x1+#1) //[1, 0]
s2 = bias_val //[WIDTH, P]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w_210.ub) //[0, 8]filter even output
xbxax9x8.b = vshuff(x2.b) //[0, 8]
vmem(sbuf++#1) = xbxax9x8.new //[0, 9]
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[0, 8]
} {
z0.uw = vrmpy(x3x2x1x0.ub, _zzz.ub) //[0, 9]filter even output
z2.uw = vrmpy(x7x6x5x4.ub, _zzz.ub) //[0, 9]filter even output
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[0, 9]
z3 = #0 //
} {
s1.uw += vrmpy(x5x4x3x2.ub, w_210.ub) //[0,10]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h,x7x6x7x6.h) //[0,10]
x1.b = vshuff(x1.b) //[1, 3]
} {
s2.uw += vrmpy(x7x6x5x4.ub, w_210.ub) //[0,11]filter even output
x2 = vmemu(ptr_x1+#2) //[1, 4]
s3 = bias_val //[WIDTH, P]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w_210.ub) //[0,12]filter even output
x7x6x5x4.b = vshuff(x1.b) //[1, 5]
x3x2x1x0 = vmem(sbuf) //[1, 5]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 4]
} {
z1.uw = vrmpy(x5x4x3x2.ub, _zzz.ub) //[0,13]filter even output
x2.b = vshuff(x2.b) //[1, 6]
w_210 = vmem(ptr_w1++#1) //[1, 6]
if(p2) jump .L_skip //dont do <=0 iterations
}
.L_vert:
{
z3.uw += vrmpy(x9x8x7x6.ub, _zzz.ub) //[0,13]filter even output
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[0, 7]
x1 = vmemu(ptr_x1+#1) //[1, 0]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w_210.ub) //[0, 8]filter even output
xbxax9x8.b = vshuff(x2.b) //[0, 8]
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[0, 8]
} {
z0.uw += vrmpy(x3x2x1x0.ub, _zzz.ub) //[0, 9]filter even output
z2.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[0, 9]filter even output
vmem(sbuf++#1) = xbxax9x8 //[0, 9]
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[0, 9]
} {
s1.uw += vrmpy(x5x4x3x2.ub, w_210.ub) //[0,10]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h,x7x6x7x6.h) //[0,10]
x1.b = vshuff(x1.b) //[1, 3]
} {
s2.uw += vrmpy(x7x6x5x4.ub, w_210.ub) //[0,11]filter even output
x2 = vmemu(ptr_x1+#2) //[1, 4]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 4]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w_210.ub) //[0,12]filter even output
x7x6x5x4.b = vshuff(x1.b) //[1, 5]
x3x2x1x0 = vmem(sbuf) //[1, 5]
} {
z1.uw += vrmpy(x5x4x3x2.ub, _zzz.ub) //[0,13]filter even output
x2.b = vshuff(x2.b) //[1, 6]
w_210 = vmem(ptr_w1++#1) //[1, 6]
}:endloop0
/* --------------------------------------------------------------------------- */
.L_skip:
{ x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[1, 7]
z3.uw += vrmpy(x9x8x7x6.ub, _zzz.ub) //[0,13]filter even output
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w_210.ub) //[1, 8]filter even output
xbxax9x8.b = vshuff(x2.b) //[1, 8]
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[1, 8]
} {
z0.uw += vrmpy(x3x2x1x0.ub, _zzz.ub) //[1, 9]filter even output
z2.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[1, 9]filter even output
vmem(sbuf++#1) = xbxax9x8 //[1, 9]
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[1, 9]
} {
s0.w = vsub(s0.w, z0.w) //
s1.uw += vrmpy(x5x4x3x2.ub, w_210.ub) //[1,10]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h,x7x6x7x6.h) //[1,10]
} {
s2.uw += vrmpy(x7x6x5x4.ub, w_210.ub) //[1,11]filter even output
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w_210.ub) //[1,12]filter even output
s0.w = vasl(s0.w, vshamt_vec.w) //
s2.w = vsub(s2.w, z2.w) //
p1 = !cmp.gt(width_cnt, #-2) //[WIDTH] test s2 oor
} {
if(p3) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]
z1.uw += vrmpy(x5x4x3x2.ub, _zzz.ub) //[1,13]filter even output
z3.uw += vrmpy(x9x8x7x6.ub, _zzz.ub) //[1,13]filter even output
} {
s2.w = vasl(s2.w, vshamt_vec.w) //
s1.w = vsub(s1.w, z1.w) //
d0.w = vmpye(s0.w, vrecip.uh) //[0,15]multiply by 1/max
} {
s3.w = vsub(s3.w, z3.w) //
d0.w += vmpyo(s0.w, vrecip.h):SSR //[0,17]3
x1 = vmemu(ptr_x0+#1) //[P, 0]
} {
s1.w = vasl(s1.w, vshamt_vec.w) //
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
min.w = vmin(min.w, d0.w) //[0,22]8 //0+2+1
ptr_w1 = ptr_w0 //[WIDTH, P]
} {
max.w = vmax(max.w, d0.w) //[0,18]5 //0+2+1
s3.w = vasl(s3.w, vshamt_vec.w) //
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]3
p0 = !cmp.gt(width_cnt, #-1) //WIDTH] test s3 oor
} {
if(p1) d2 = d0 //
p1 = !cmp.gt(width_cnt, #-3) //[WIDTH] test s1 oor
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
if(p0) s3 = s0 //
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]9
min.w = vmin(min.w, d2.w) //[0,22]8 //0+2+1
max.w = vmax(max.w, d2.w) //[0,18]5 //0+2+1
loop0(.L_vert, filt_height) //[WIDTH, P]
} {
if(p1) d1 = d0 //
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
x1.b = vshuff(x1.b) //[0, 3]
p3 = cmp.eq(r0, r0) //[WIDTH]
} {
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]9
max.w = vmax(max.w, d1.w) //[0,26]12 //0+2+1
min.w = vmin(min.w, d1.w) //[0,27]13 //0+2+1
ptr_x1 = ptr_x0 //[WIDTH, P]
} {
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
max.w = vmax(max.w, d3.w) //[0,26]12 //0+2+1
min.w = vmin(min.w, d3.w) //[0,27]13 //0+2+1
}:endloop1 //end width
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
ptr_w0 += asl(filt_size, #7) //[DEPTH,E]filt_size = filt_height*256 //4*3*64
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
next_out_width_32 = memw(sp+#16<<2)
} {
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
recip_level = memw(sp+#52)
} {
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]
if(!p0) jump .L_depth //[DEPTH,E]
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //
depth_cnt = memw(sp+#17<<2) //depth
in_buf = add(in_buf, next_in_width_depth_stride) //stride
if(!p0.new) jump:nt .L_height //
}//end height
/* ----------------------------------------------------------------------------- */
ptr_max = memw(sp+#23<<2) //
{
r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s2_3xN_asm, .L_end-dwconv2dbbb_s2_3xN_asm
/* ----------------------------------------------------------------------------- */
|
XiaoMi/nnlib | 15,380 | hexagon/asm_src/dwconv2dbbb_s1_3xN_h.S | /* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s1_3xN_h.S"
.global dwconv2dbbb_s1_3xN_asm
.balign 32
.type dwconv2dbbb_s1_3xN_asm, @function
dwconv2dbbb_s1_3xN_asm:
/* ----------------------------------------------------------------------------- */
#define in_buf r0
#define filt r1
#define out_buf r2
#define next_in_width_depth r3
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5
#define next_out_width_32 r10
#define depth r11
#define out_width r12
#define out_height r13
#define width_count r27
#define filt_height r25
#define filt_zero r7
#define bias_sum r14
#define ptr_max r15
#define recip_level r27
#define recip_shift r8
#define stride_v r28
#define ptr_w0 r16
#define ptr_w1 r17
#define c8 r6
#define bias_ptr r18
#define ptr_x0 r19
#define ptr_x1 r20
#define ptr_xin r22
#define ptr_y r23
#define depth_cnt r26
#define filt_size r9
#define next_in_width_depth_stride r28
#define zzzz r7 //111111111111
#define _zzz r21 //111111111111
#define zzz_ r4 //111111111111
#define nort r24
#define vrecip v0
#define vshamt_vec v1
#define maxv v2
#define minv v3
#define bias_val v4
#define x0 v8 //___11_______
#define x1 v29 //_111________
#define w_210 v9 //_______111__
#define w210_ v28 //__________11
#define x7x5x6x4 v29 //____11______
#define x7x6x5x4 v10 //111111111111
#define x3x1x2x0 v30 //_____11_____
#define x3x2x1x0 v11 //111111111111
#define x3x2x3x2 v6 //111111111111
#define x5x4x3x2 v7 //111111111111
#define s0 v12 //111111111111
#define s1 v13 //111111111111
#define s2 v14 //111111111111
#define s3 v15 //111111111111
#define z0 v16 //111111111111
#define z1 v17 //111111111111
#define z2 v18 //111111111111
#define z3 v19 //111111111111
#define d0 v20 //
#define d1 v21 //
#define d1d0 v24 //
#define d2 v22 //
#define d3 v23 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
c8 = #8
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
} {
memd(sp+#16) = r21:20
memd(sp+#24) = r23:22
} {
memd(sp+#32) = r25:24
memd(sp+#40) = r27:26
} {
next_out_width_32 = memw(sp+#16<<2) //
depth = memw(sp+#17<<2) //
nop;
nop;
} {
nort = #0
out_width = memw(sp+#18<<2) //
out_height = memw(sp+#19<<2) //
depth = lsr(depth, #5) //depth/32
} {
filt_height = memw(sp+#20<<2) //
filt_zero = memw(sp+#21<<2) //
out_width = add(out_width, #3)
nop;
} {
bias_sum = memw(sp+#22<<2) //
ptr_max = memw(sp+#23<<2) //
out_width = lsr(out_width, #2)
filt_size = asl(filt_height, #7)
} {
recip_shift = memw(sp+#25<<2) //
zzzz = vsplatb(filt_zero)
p3 = cmp.eq(filt_height, #2)
} {
memw(sp+#17<<2) = depth //
vshamt_vec= vsplat(recip_shift)
stride_v = memw(sp+#26<<2) //
} {
next_in_width_depth_stride = mpyi(next_in_width_depth, stride_v)
maxv = vmem(ptr_max+#0)
depth_cnt = memw(sp+#17<<2) //depth
filt_height = add(filt_height, #-2)
} {
minv = vmem(ptr_max+#1)
zzz_ = asl(zzzz, c8)
_zzz = lsr(zzzz, c8)
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{
recip_level = memw(sp+#24<<2) //
} {
bias_ptr = bias_sum //
ptr_xin = in_buf
ptr_w0 = filt
filt_height = max(filt_height, nort)
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{ x0 = vmemu(ptr_xin+#0) //[1, 0]
ptr_x0 = ptr_xin //
} {
x1 = vmemu(ptr_x0+#1) //[0, 0]
ptr_y = out_buf //
} {
vrecip = vmem(recip_level++#1) //
} {
x3x1x2x0.b = vshuff(x0.b) //[0, 1]
p2 = !cmp.eq(r0,r0) //[WIDTH]
bias_val = vmem(bias_ptr++#1) //
memw(sp+#52) = recip_level //quantization level save to temp location
} {
ptr_x1 = add(ptr_x0, next_in_width_depth) //[0, 1]
x7x5x6x4.b = vshuff(x1.b) //[0, 2]
loop1(.L_width, out_width)
width_count = memw(sp+#18<<2) //
} {
width_count = add(width_count, #-4) //
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[0, 3]
loop0(.L_vert, filt_height) //[WIDTH, P]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{
w_210.cur = vmem(ptr_w0+#0) //[0, 2]
w210_.w = vasl(w_210.w, c8) //[0, 5]
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[0, 5]
ptr_w1 = add(ptr_w0, #128) //[WIDTH, P]
} {
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[0, 4]
x0 = vmemu(ptr_x1+#0) //[0, 4]
z0.uw = vrmpy(x3x2x1x0.ub, _zzz.ub) //[0, 9]filter even output
} {
s0.uw = vrmpy(x3x2x1x0.ub, w_210.ub) //[0, 6]filter even output
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[0, 6]
x1 = vmemu(ptr_x1+#1) //[1, 0]
} {
s1.uw = vrmpy(x3x2x1x0.ub, w210_.ub) //[0, 7]filter even output
x3x1x2x0.b = vshuff(x0.b) //[1, 1]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 1]
s3 = bias_val //[WIDTH, P]
} {
s2.uw = vrmpy(x5x4x3x2.ub, w_210.ub) //[0, 8]filter even output
x7x5x6x4.b = vshuff(x1.b) //[1, 2]
w_210 = vmem(ptr_w1++#1) //[1, 2]
ptr_x0 = add(ptr_x0, #128) //[WIDTH,P]+4 32 depths for stride 1
} {
z1.uw = vrmpy(x3x2x1x0.ub, zzz_.ub) //[0, 9]filter even output
z2.uw = vrmpy(x5x4x3x2.ub, _zzz.ub) //[0,11]filter even output
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[1, 3]
s2.w = vadd(bias_val.w, s2.w) //[WIDTH, P]
} {
s3.uw += vrmpy(x5x4x3x2.ub, w210_.ub) //[0,10]filter even output
x0 = vmemu(ptr_x1+#0) //[1, 4]
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[1, 4]
} {
z3.uw = vrmpy(x5x4x3x2.ub, zzz_.ub) //[0,11]filter even output
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[1, 5]
w210_.w = vasl(w_210.w, c8) //[1, 5]
if(p3) jump .L_skip //
}
.balign 32
.L_vert:
{ s0.uw += vrmpy(x3x2x1x0.ub, w_210.ub) //[0, 6]filter even output
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[0, 6]
x1 = vmemu(ptr_x1+#1) //[1, 0]
} {
s1.uw += vrmpy(x3x2x1x0.ub, w210_.ub) //[0, 7]filter even output
x3x1x2x0.b = vshuff(x0.b) //[1, 1]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 1]
nop;
} {
s2.uw += vrmpy(x5x4x3x2.ub, w_210.ub) //[0, 8]filter even output
x7x5x6x4.b = vshuff(x1.b) //[1, 2]
w_210 = vmem(ptr_w1++#1) //[1, 2]
nop;
} {
z0.uw += vrmpy(x3x2x1x0.ub, _zzz.ub) //[0, 9]filter even output
z1.uw += vrmpy(x3x2x1x0.ub, zzz_.ub) //[0, 9]filter even output
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[1, 3]
nop;
} {
s3.uw += vrmpy(x5x4x3x2.ub, w210_.ub) //[0,10]filter even output
x0 = vmemu(ptr_x1+#0) //[1, 4]
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[1, 4]
} {
z2.uw += vrmpy(x5x4x3x2.ub, _zzz.ub) //[0,11]filter even output
z3.uw += vrmpy(x5x4x3x2.ub, zzz_.ub) //[0,11]filter even output
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[1, 5]
w210_.w = vasl(w_210.w, c8) //[1, 5]
}:endloop0
.L_skip:
{ s0.uw += vrmpy(x3x2x1x0.ub, w_210.ub) //[1, 6]filter even output
x5x4x3x2.h = vshuffe(x7x6x5x4.h,x3x2x3x2.h) //[1, 6]
x0 = vmemu(ptr_x0+#0) //[1, 0]
} {
s1.w = vadd(bias_val.w, s1.w) //[WIDTH, P]
s0.w = vadd(s0.w, bias_val.w) //[WIDTH, ]
z0.uw += vrmpy(x3x2x1x0.ub, _zzz.ub) //[1, 9]filter even output
z1.uw += vrmpy(x3x2x1x0.ub, zzz_.ub) //[1, 9]filter even output
} {
s1.uw += vrmpy(x3x2x1x0.ub, w210_.ub) //[1, 7]filter even output
s0.w = vsub(s0.w, z0.w) //[WIDTH, ]
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
p1 =!cmp.gt(width_count, #-3) //[WIDTH]
} {
s2.uw += vrmpy(x5x4x3x2.ub, w_210.ub) //[1, 8]filter even output
s1.w = vsub(s1.w, z1.w) //[WIDTH, ]
x3x1x2x0.b = vshuff(x0.b) //[0, 1]
nop;
} {
s3.uw += vrmpy(x5x4x3x2.ub, w210_.ub) //[1,10]filter even output
s0.w = vasl(s0.w, vshamt_vec.w) //[WIDTH, ]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
loop0(.L_vert, filt_height) //[WIDTH, P]
} {
z2.uw += vrmpy(x5x4x3x2.ub, _zzz.ub) //[1,11]filter even output
z3.uw += vrmpy(x5x4x3x2.ub, zzz_.ub) //[1,11]filter even output
s1.w = vasl(s1.w, vshamt_vec.w) //[WIDTH, ]
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[0, 3]
} {
d0.w = vmpye(s0.w, vrecip.uh) //[WIDTH, ]multiply by 1/max
s2.w = vsub(s2.w, z2.w) //[WIDTH, ]
if(p1) s1 = s0 //[WIDTH]
nop;
} {
if(p2) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]
d0.w += vmpyo(s0.w, vrecip.h):SSR //[WIDTH, ]
s3.w = vsub(s3.w, z3.w) //[WIDTH, ]
} {
p1 =!cmp.gt(width_count, #-1) //[WIDTH]
minv.w = vmin(minv.w, d0.w) //[WIDTH, ]
d1.w = vmpye(s1.w, vrecip.uh) //[WIDTH, ]
s2.w = vasl(s2.w, vshamt_vec.w) //[WIDTH, ]
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[WIDTH, ]
s3.w = vasl(s3.w, vshamt_vec.w) //[WIDTH, ]
maxv.w = vmax(maxv.w, d0.w) //[WIDTH, ]
nop;
} {
d2.w = vmpye(s2.w, vrecip.uh) //[WIDTH, ]
if(p1) s3 = s0 //[WIDTH]
maxv.w = vmax(maxv.w, d1.w) //[WIDTH, ]
p2 = cmp.eq(r0, r0) //[WIDTH]
} {
d1d0.h = vpack(d1.w, d0.w):sat //[WIDTH, ]
minv.w = vmin(minv.w, d1.w) //[WIDTH, ]
d2.w += vmpyo(s2.w, vrecip.h):SSR //[WIDTH, ]
p1 =!cmp.gt(width_count, #-2) //[WIDTH]
} {
if(p1) d2 = d0 //[WIDTH]
d3.w = vmpye(s3.w, vrecip.uh) //[WIDTH, ]
x1 = vmemu(ptr_x0+#1) //[0, 0]
} {
d3.w += vmpyo(s3.w, vrecip.h):SSR //[WIDTH, ]
maxv.w = vmax(maxv.w, d2.w) //[WIDTH, ]
minv.w = vmin(minv.w, d2.w) //[WIDTH, ]
width_count = add(width_count, #-4) //[WIDTH]
}
{
ptr_x1 = add(ptr_x0, next_in_width_depth) //[0, 1]
maxv.w = vmax(maxv.w, d3.w) //[WIDTH, ]
minv.w = vmin(minv.w, d3.w) //[WIDTH, ]
x7x5x6x4.b = vshuff(x1.b) //[0, 2]
}:endloop1 //end width
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
ptr_w0 = add(ptr_w0, filt_size) //[DEPTH,E]filt_size = filt_height*256 //4*3*64
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]
} {
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
recip_level = memw(sp+#52) //read quantization level from temp location
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]
if(!p0) jump .L_depth //[DEPTH,E]
if( p0) out_height = add(out_height, #-1) //
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //
depth_cnt = memw(sp+#17<<2) //depth
in_buf = add(in_buf, next_in_width_depth_stride) //stride
if(!p0.new) jump:nt .L_height
}
//end height
/* ----------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = maxv //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = minv //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s1_3xN_asm, .L_end-dwconv2dbbb_s1_3xN_asm
/* ----------------------------------------------------------------------------- */
/* =========================================================================== */
|
XiaoMi/nnlib | 21,628 | hexagon/asm_src/inconv2dbbb332_d32_v60_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth < 32. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/11/17 created
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = 928 bytes
* STACK = 80 bytes
* ASSUMPTIONS
*/
/*=============================================================================*/
.text
.file "inconv2dbbb332_d32_v60_h.S"
.global inconv2dbbb332_v60_asm
.balign 32
.type inconv2dbbb332_v60_asm, @function
/* parameters:
* r0 ( const uint8_t * input,
* r1 const uint8_t * weights,
* r2 uint8_t * output,
* r3 int in_width_pad,
* r4 int next_out_width_row,
* r5 int out_width,
* PARMW(0) int indepth,
* PARMW(1) int filt_width,
* PARMW(2) int filt_height,
* PARMW(3) int num_out_lines,
* PARMW(4) int32_t * minmax_buf,
* PARMW(5) int recip_level,
* PARMW(6) const int32_t *biasbuf,
* PARMW(7) const int32_t *ptr_suma,
* PARMW(8) int next_suma,
* PARMW(9) int stride_height_width,
* PARMW(10) int recip_shamt);
*/
inconv2dbbb332_v60_asm:
/*=============================================================================*/
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define in_width r3 //(pad_l+in_width+pad_r)
#define out_width_stride_depth r4 //next line amount
#define out_width r5 //is amount of work to be done
#define in_depth r26 //0
#define filt_width r17 //1 horizontal fuilter width
#define filt_height r8 //2 filt_height lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_max r13 //4 maximum and minum buffer
#define recip_level r14 //5 255 / (MAX - MIN) - used to scale to bytes
#define filt_sum r15 //6 gemsumb
#define active_sum r18 //7 gemsuma activations
#define next_suma_buf r1 //8 stride for suma buffer
#define stride_v_h r10 //9 stride_vert | stride_horz ->M0
#define sum0 r19
#define sum1 r19
#define sum2 r19
#define sum3 r19
#define recip_shamt r1
/*=============================================================================*/
#define fetch_ptr r0
#define fetch_ptr0 r2
#define in_depth3 r21
#define in_width_stride_depth r15 //in_width * stride * in_depth for next output
#define ptr_x0 r16 //tmp pointer to activations
#define ptr_x1 r23 //dynamic pointer to activations
#define next_outputs r27 //jump to input ptr for next set of outputs
#define ptr_w r20 //pointer to weights
#define in_width_depth r22 //width of input image in bytes
#define c4 r2 //shuffle size in final max and min find
#define ptr_z r24 //pointer to outputs
#define col_count r25 //column count, how much of width used
#define x07x04_x03x00 r11:10 //8 activations output 0
#define x07x04 r11 //4 activations output 0
#define x03x00 r10 //4 activations output 0
#define x17x14_x13x10 r13:12 //8 activations output 1
#define x17x14 r13 //4 activations output 1
#define x13x10 r12 //4 activations output 1
#define x27x24_x23x20 r11:10 //8 activations output 2
#define x27x24 r11 //4 activations output 2
#define x23x20 r10 //4 activations output 2
#define x37x34_x33x30 r7:6 //8 activations output 3
#define x37x34 r7 //4 activations output 3
#define x33x30 r6 //4 activations output 3
/*=============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
#define s0 v0 //accumulator for output 0
#define s1 v1 //accumulator for output 1
#define s1s0 v1:0 //accumulator
#define s2 v2 //accumulator for output 2
#define s3 v3 //accumulator for output 3
#define s3s2 v3:2 //
#define d0 v4 //
#define d1 v4 //
#define d2 v4 //
#define d3 v5 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define yout v17 //
#define y3_prev v16 //copy of previous value
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define minomine v19:18 //
#define mine v18 //
#define mino v19 //
#define biasvec v16 //
#define recipvec v15 //
#define vzero v20
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
#define FRAMESIZE 72
#define PARMW(n) sp+#(8+FRAMESIZE+4*(n))
{ allocframe(#FRAMESIZE) // 0th entry on stack is (72+8)/4 =20 ints
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
r23 = ##0x7fffffff //max pos
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
mine = vsplat(r23)
} {
memw(sp+#48) = ptr_xi
memw(sp+#52) = ptr_wi
maxe = vnot(mine) // all -0x80000000
} {
active_sum = memw(PARMW(7)) //
in_depth = memw(PARMW(0))
} {
filt_height = memw(PARMW(2)) //extract filt_height
filt_width = memw(PARMW(1)) //extract filt_width
} {
memw(sp+#56) = ptr_zi
filt_width = lsr(filt_width, #1) //
out_height = memw(PARMW(3)) //number of output lines
} {
recip_level = memw(PARMW(5)) //
filt_sum = memw(PARMW(6))
in_width_depth = mpyi(in_width, in_depth) //in_depth * in_width line
} {
wsum = vmem(filt_sum+#0) //
recipvec = vsplat(recip_level) //
stride_v_h = memw(PARMW(9))
} {
// next_suma_buf = memw(PARMW(8))
in_width_stride_depth=mpy(in_width_depth.L,stride_v_h.H)//
stride_v_h = zxth(stride_v_h)
} {
dcfetch(active_sum+#0<<6)
in_depth = mpyi(in_depth, stride_v_h)
stride_v_h = asl(stride_v_h, #2)
} {
next_outputs=mpyi(filt_height,in_width_depth)//filt_height*in_width*in_depth
//M0 = stride_v_h = #8
} {
next_outputs += mpyi(stride_v_h, #-4) //
memw(sp+#60) = active_sum //
in_depth3 = addasl(in_depth, in_depth, #1) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{ active_sum = memw(sp+#60)
ptr_x0 = memw(sp+#48) //ptr_xi
loop0(.L_filt_height, filt_height) //[P, 0]for(fil=0;fil<h*depth/32;fil+=1){
} {
ptr_z = memw(sp+#56) //ptr_zi
col_count = out_width
sum0 = memw(active_sum++#8) //stride = 8
fetch_ptr = add(ptr_x0, in_width_depth) //
} {
next_suma_buf = memw(PARMW(8))
dcfetch(ptr_x0+#0<<6) //[0, 3]
d0 = vsplat(sum0)
} {
memw(sp+#60) += next_suma_buf
sum1 = memw(active_sum++#8) //stride = 8
s0.w = vadd(wsum.w, d0.w)
} {
d1 = vsplat(sum1)
sum2 = memw(active_sum++#8) //stride = 8
dcfetch(fetch_ptr+#0<<6) //[0, 3]
} {
s1.w = vadd(wsum.w, d1.w)
d2 = vsplat(sum2)
sum3 = memw(active_sum++#8) //stride = 8
memw(sp+#56) += out_width_stride_depth //ptr_zi += out_width_stride_depth
} { // r1 is recip_shamt, except when it's used as next_suma_buf above.
recip_shamt = memw(PARMW(10))
memw(sp+#48) += in_width_stride_depth //ptr_xi+=in_width_stride_depth //ptr_x+=in_width*stride*in_depth)
s2.w = vadd(wsum.w, d2.w)
d3 = vsplat(sum3)
} {
out_height = add(out_height, #-1) //
s3.w = vadd(wsum.w, d3.w)
ptr_w = memw(sp+#52) //[P, 0]ptr_wi initialize filter pointer
fetch_ptr = add(ptr_x0, in_width_depth) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
.L_filt_height:
{
ptr_x1 = ptr_x0 //[P, 5]
ptr_x0 = add(ptr_x0, in_width_depth) //[E, 7]next line ptr_y keeps going
x27x24_x23x20 = memd(ptr_x0+in_depth<<#1) //[0, 0]
x37x34_x33x30 = memd(ptr_x0+in_depth3<<#0) //[0, 0]
} {
y0.cur = vmem(ptr_w++#1) //[0, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 1]
x17x14_x13x10 = memd(ptr_x1+in_depth<<#0) //[0, 1]
} {
y1.cur = vmem(ptr_w++#1) //[0, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 2]
x07x04_x03x00 = memd(ptr_x1++#1<<3) //[0, 2]stride = 4
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 3]
dcfetch(fetch_ptr+#0<<6) //[0, 3]
x37x34 = memw(ptr_x1+#0<<3) //[1, 2]
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 4]
x27x24_x23x20 = memd(ptr_x1+in_depth<<#1) //[1, 0]
x33x30 = memw(ptr_x1+in_depth3<<#0) //[1, 0]
} {
y0.cur = vmem(ptr_w++#1) //[1, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 1]
x13x10 = memw(ptr_x1+in_depth<<#0) //[1, 1]
} {
s0.uw += vrmpy(y0.ub, x37x34.ub) //[1, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 3]
fetch_ptr = add(fetch_ptr, in_width_depth) //
}:endloop0
/* ---------------------------------------------------------------------------- */
#if 1
// << by recip_shamt; scale, and pack. min/max are found on the results
// prior to packing.
{
s2.w = vasl(s2.w,recip_shamt)
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next 4
dcfetch(active_sum+#0<<6)
} {
s3.w = vasl(s3.w,recip_shamt)
col_count = add(col_count, #-4) //
loop0(.L_filt_height, filt_height) //[P, 1]for(fil=0;fil<h*depth/32;fil+=1){
} {
y2.w = vmpye(s2.w, recipvec.uh) //
s0.w = vasl(s0.w,recip_shamt)
p1 = cmp.eq(col_count, #0) //
} {
y2.w+= vmpyo(s2.w, recipvec.h):SSR //
s1.w = vasl(s1.w,recip_shamt)
sum3 = memw(active_sum+#(3*8)) //#2<<2) //stride = 4
} {
mine.w = vmin(mine.w, y2.w) //see if y2 is min
maxe.w = vmax(maxe.w, y2.w) //see if y2 is max
y3.w = vmpye(s3.w, recipvec.uh) //
} {
d3 =vsplat(sum3)
dcfetch(ptr_x0+#0<<6)
sum2 = memw(active_sum+#(2*8)) //stride = 4
} {
s3.w = vadd(wsum.w,d3.w)
fetch_ptr = add(ptr_x0, in_width_depth) //
y3.w+= vmpyo(s3.w, recipvec.h):SSR //
} {
mine.w = vmin(mine.w, y3.w) //see if y3 is min
maxe.w = vmax(maxe.w, y3.w) //see if y3 is max
y0.w = vmpye(s0.w, recipvec.uh) //
dcfetch(fetch_ptr+#0<<6)
} {
sum0 = memw(active_sum+#0) //stride = 4
d2 = vsplat(sum2)
fetch_ptr = add(ptr_x0, in_width_depth) //
} {
s2.w = vadd(wsum.w,d2.w)
y0.w+= vmpyo(s0.w, recipvec.h):SSR //
y3.h = vpack(y3.w, y2.w):sat //#sat8 <0, >255
} {
mine.w = vmin(mine.w, y0.w) //see if y0 is min
maxe.w = vmax(maxe.w, y0.w) //see if y0 is max
y1.w = vmpye(s1.w, recipvec.uh) //
} {
y1.w+= vmpyo(s1.w, recipvec.h):SSR //
} {
d0 = vsplat(sum0)
mine.w = vmin(mine.w, y1.w) //see if y1 is min
maxe.w = vmax(maxe.w, y1.w) //see if y1 is max
} {
s0.w = vadd(wsum.w,d0.w)
sum1 = memw(active_sum+#8) //stride = 4
y1.h = vpack(y1.w, y0.w):sat //#sat8 <0, >255
} {
ptr_w = memw(sp+#52) //[P, 0]ptr_wi initialize filter pointer
d1 = vsplat(sum1)
active_sum = add(active_sum,#8*4)
} {
s1.w = vadd(wsum.w, d1.w)
y3.ub = vpack(y3.h, y1.h):sat //#sat8 <0, >255
vmem(ptr_z++#1) = y3.new //#[E, ]store 2nd 32bytes
if(!p1) jump:t .L_width //
}//end cols per line
#else
{ y2.w = vmpye(s2.w, recipvec.uh) //
mine.w = vmin(mine.w, s2.w) //see if s2 is max
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next 4
dcfetch(active_sum+#0<<6)
} {
y2.w+= vmpyo(s2.w, recipvec.h):SSR //
col_count = add(col_count, #-4) //
loop0(.L_filt_height, filt_height) //[P, 1]for(fil=0;fil<h*depth/32;fil+=1){
} {
y3.w = vmpye(s3.w, recipvec.uh) //#
mine.w = vmin(mine.w, s3.w) //[E, 4]see if s3 is max
maxe.w = vmax(maxe.w, s3.w) //[E, 3]
p1 = cmp.eq(col_count, #0) //
} {
maxe.w = vmax(maxe.w, s2.w) //[E, 4]
y3.w+= vmpyo(s3.w, recipvec.h):SSR //
sum0 = memw(active_sum++#8) //#2<<2) //stride = 4
} {
maxe.w = vmax(maxe.w, s0.w) //see if s0 is max
mine.w = vmin(mine.w, s0.w) //see if s0 is max
y0.w = vmpye(s0.w, recipvec.uh) //
} {
d0 =vsplat(sum0)
maxe.w = vmax(maxe.w, s1.w) //
mine.w = vmin(mine.w, s1.w) //see if s1 is max
dcfetch(ptr_x0+#0<<6) //[1, 2]
} {
y3.h = vpack(y3.w, y2.w):sat //#sat8 <0, >255
y0.w+= vmpyo(s0.w, recipvec.h):SSR //
sum1 = memw(active_sum++#8) //stride = 4
} {
s0.w = vadd(wsum.w, d0.w)
y1.w = vmpye(s1.w, recipvec.uh) //
fetch_ptr = add(ptr_x0, in_width_depth) //
} {
d1 = vsplat(sum1)
sum2 = memw(active_sum++#8) //stride = 4
} {
y1.w+= vmpyo(s1.w, recipvec.h):SSR //
s1.w = vadd(wsum.w, d1.w)
dcfetch(fetch_ptr+#0<<6) //[0, 3]
} {
d2 = vsplat(sum2)
sum3 = memw(active_sum++#8) //stride = 4
fetch_ptr = add(ptr_x0, in_width_depth) //
} {
s2.w = vadd(wsum.w, d2.w)
y1.h = vpack(y1.w, y0.w):sat //#>>16
ptr_w = memw(sp+#52) //[P, 0]ptr_wi initialize filter pointer
d3 = vsplat(sum3)
} {
s3.w = vadd(wsum.w, d3.w)
y3.ub = vpack(y3.h, y1.h):sat //#sat8 <0, >255
vmem(ptr_z++#1) = y3.new //#[E, ]store 2nd 32bytes
if(!p1) jump:t .L_width //
}//end cols per line
#endif
/* ---------------------------------------------------------------------------- */
{ p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
/* ---------------------------------------------------------------------------- */
{
ptr_max = memw(PARMW(4)) //ptr pre computed max value in output
r17:16 = memd(sp+#0)
} {
r19:18 = memd(sp+#8) //Q
maxo = vmem(ptr_max+#0)
} {
r21:20 = memd(sp+#16) //Q
maxe.w = vmax(maxo.w,maxe.w)
vmem(ptr_max+#0) = maxe.new
} {
r23:22 = memd(sp+#24) //Q
mino = vmem(ptr_max+#1)
} {
mine.w = vmin(mino.w,mine.w)
vmem(ptr_max+#1) = mine.new
}
/* ---------------------------------------------------------------------------- */
{
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size inconv2dbbb332_v60_asm, .L_end-inconv2dbbb332_v60_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 8,806 | hexagon/asm_src/memconvert_hvx.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This operation does a group copy and scale.
* memconvert_hvx( uint8_t * dsto,
* uint8_t const *srco,
* int depth,
* int offset,
* int16_t gain,
* int stride,
* int iters.
* Input is iters*depth elements at 'srco', contiguous;
* Output is iters groups of depth elements each
* group i starts at dsto + i*stride
* each group is contiguous
*
* The scaling done in each group is
* tmp = ( in[i] + offset) * gain
* out[i] = clip_to_u8( tmp/32K (with rounding))
*
* i.e.
* for( int i = 0; i < iters; i++ )
* for (int j = 0; j< depth; j++ )
* dsto[ i*stride + j ] = scale( srco[i*depth + j], offset,gain)
*
*/
.global memconvert_hvx
.type memconvert_hvx, @function
.balign 32
memconvert_hvx:
/* ============================================================================ */
#define dsto r0 //dest ptr
#define srco r1 //src ptr
#define length r2 //depth
#define offset r3 //offset
#define gain r4 //gain
#define stride r5 //stride
#define iters r6 //num of depths
/* ============================================================================ */
#define src r6
#define srcalign r13
#define dstalign r14
#define mid r7
#define end r8
#define sel0 r9
#define kernel r10
#define sel1 r11
#define c128 r12
#define dst r15
#define x0 v0
#define x1 v1
#define y0 v2
#define xa v12
#define vpredp v3
#define vprede v4
#define z1z0 v7:6
#define z1 v7
#define z0 v6
#define voffset v5
#define z3 v9
#define z2 v8
#define vone v10
#define vzero v11
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{
offset = combine(offset.L, offset.L)
gain = combine(gain.L, gain.L)
iters = memw(sp+#0) //
vzero = #0
} {
q3 = and(q3, !q3) //disable first store
vone = vnot(vzero)
sel0 = ##0x01010101 //position of qprolog
} {
loop1(.L_iters, iters)
voffset = vsplat(offset)
c128 = #128
dst = dsto
} {
src = srco
qprolog =vsetq(dsto) //qprolog vec predicate __|---
end = add(length, dsto) //last byte of block
dstalign = and(dsto, #127) //alignment of dst
} {
sel1 = add(sel0, sel0) //position of modified vec predicates
qepilog = vsetq(end) //setup epilog vec predicate
srcalign = and(src, #127) //alignment of src
end = and(end, #127) //alignment of last byte
} {
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
vprede = vand(qepilog, sel1) //write epilog pred into vreg
xa.tmp = vmem(src+#0) //load first block of input data
z1z0.b = vshuffoe(vzero.b, xa.b) //[P
}
.balign 32
.L_iters:
{
mid = sub(srcalign, dstalign) //shift up or down src data
z0.h = vadd(z0.h, voffset.h) //[P
qprolog = or(qprolog, !qepilog) //modified proglog if no kernel
dstalign = add(dstalign, length) //amount of total data
} {
z1.h = vadd(z1.h, voffset.h) //[P
p1 = cmp.gt(mid, #-1) //see if we shift down
vpredp|= vand(qprolog, sel0) //store modified prolog
kernel = sub(length, end) //bytes in loop0
} {
z2.h = vmpy(z0.h, gain.h):<<1:rnd:sat//[P
if(p1) src = add(src, c128) //if shift up force reload
kernel = add(kernel, #127) //round kernel up to 128 nearest
p2 = cmp.gt(dstalign, #127) //if > 127 dont use modified prolog
} {
z3.h = vmpy(z1.h, gain.h):<<1:rnd:sat//[P
xa.tmp = vmem(src++#1) //[0, 0]load next bloc
z1z0.b = vshuffoe(vzero.b, xa.b) //[P
if(!p2) sel1 = sel0 //dont choose modfied
} {
z0.h = vadd(z0.h, voffset.h) //[0, 1]
kernel= lsr(kernel, #7) //kernel in blocks of 128
qprolog = vand(vpredp, sel1) //select the qprolog
} {
x0.ub = vsat(z3.h, z2.h) //[P
z1.h = vadd(z1.h, voffset.h) //[0, 2]
qepilog = vand(vprede, sel1) //choose correct qepilog
} {
if(q3) vmem(dst+#0) = y0 //[1, 9]store out epilog data
z2.h = vmpy(z0.h, gain.h):<<1:rnd:sat//[0, 3]
loop0(.L_blocks, kernel) //start main loop
} {
dst = dsto
dsto = add(dsto, stride)
srco = add(srco, length)
z3.h = vmpy(z1.h, gain.h):<<1:rnd:sat//[0, 4]
}
/* ============================================================================ */
.balign 32
.L_blocks:
{
xa.tmp = vmem(src++#1) //[1, 0]load next bloc
z1z0.b = vshuffoe(vzero.b, xa.b) //[P
} {
x1.ub = vsat(z3.h, z2.h) //[0, 6]
z0.h = vadd(z0.h, voffset.h) //[1, 1]
} {
z1.h = vadd(z1.h, voffset.h) //[1, 2]
q3 = or(qepilog, qepilog)
} {
y0 = valign(x1, x0, mid) //[0, 8]align using the offset mid
x0 = x1 //[0, 8]reuse x1 in next loop
z2.h = vmpy(z0.h, gain.h):<<1:rnd:sat//[1, 3]
} {
if(!qprolog) vmem(dst++#1) = y0 //[0, 9]do prolog load as part of main loop
qprolog = vcmp.eq(vone.b, vzero.b) //[0, 9]and(qprolog, !qprolog) all subsequent prologs true
z3.h = vmpy(z1.h, gain.h):<<1:rnd:sat//[1, 4]
}:endloop0
/* ============================================================================ */
{
src = srco
qprolog =vsetq(dsto) //qprolog vec predicate __|---
end = add(length, dsto) //last byte of block
dstalign = and(dsto, #127) //alignment of dst
} {
x1.ub = vsat(z3.h, z2.h) //[1, 6]
sel1 = add(sel0, sel0) //position of modified vec predicates
qepilog = vsetq(end) //setup epilog vec predicate
srcalign = and(src, #127) //alignment of src
} {
end = and(end, #127) //alignment of last byte
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
vprede = vand(qepilog, sel1) //write epilog pred into vreg
} {
xa.tmp = vmem(src+#0) //load first block of input data
z1z0.b = vshuffoe(vzero.b, xa.b) //[P
y0 = valign(x1, x0, mid) //[1, 8]align for final output
}:endloop1
{
if(q3) vmem(dst+#0) = y0 //[1, 9]store out epilog data
}{
jumpr r31
}
/*==============================================================================*/
.L_end:
.size memconvert_hvx, .L_end-memconvert_hvx
|
XiaoMi/nnlib | 19,389 | hexagon/asm_src/autoquantize_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Desrciption
Quantize float-point inputs to 8-bit fixed-point data.
Requirements
Input/output must be aligned to 128 bytes. Final store will write only to amount needed.
*/
/* ------------------------------------------------------------------------------------------ */
.text
.global quantize_floats_to_8b_asm
.balign 32
.type quantize_floats_to_8b_asm, @function
quantize_floats_to_8b_asm:
/* ------------------------------------------------------------------------------------------ */
#define in_ptr r0 //ptr to input
#define out_ptr r1 //ptr to output
#define elements r2 //
#define min_offset_in r3 //
#define maxexp_in r4 //
#define scaling r5 //
#define mantbits r6 //
#define c007f_ffff r7 //
#define c0080_0000 r8 //
#define c0000_00ff r9 //
#define c31 r10 //
#define partial r3 //
#define c128 r8 //
#define lpcount r9 //
#define fr r7 //
#define numvec r10 //
#define maxblksize r11 //
#define block r12 //
#define fetch_adr r13 //
#define l2fparam_l r14 //
#define l2fparam_h r15 //
#define l2fparam r15:14 //
#define boundary r28
/* ------------------------------------------------------------------------------------------ */
#define vals(n) v0
#define s_exp(n) v1
#define exp(n) v2
#define mant(n) v3
#define shift(n) v4
#define smant(n) v5
#define mants(n) v6
#define neg_mants(n) v7
#define word0 v8
#define word1 v9
#define word2 v10
#define word3 v11
#define halves0 v12
#define halves1 v13
#define bytes v12
#define maxexp v14
#define min_offset v15
#define const007f_ffff v16
#define const0080_0000 v17
#define const0000_00ff v18
#define const31 v19
#define zero v20 //const zero
#define mask v21
#define vscale v22
/* --------------------------------------------------------------------------------------- */
{ p0 = cmp.gt(elements,#0) //
if (!p0.new) jumpr:nt r31 //
maxblksize = #4096 //
}{
c007f_ffff = ##0x007FFFFF //
c0080_0000 = ##0x00800000 //
}{
const007f_ffff = vsplat(c007f_ffff) //
const0080_0000 = vsplat(c0080_0000) //
c31 = #31 //
c128 = #128 //
}{
const31 = vsplat(c31) //
c0000_00ff = #0x00FF //
block = min(elements,maxblksize) //
}{
const0000_00ff = vsplat(c0000_00ff) //
lpcount = add(block,#127) //
numvec = lsr(block,#5) //
}{
boundary = addasl(in_ptr,elements,#2) //
l2fparam_h = #128 //
l2fparam_l = combine(c128.l, numvec.l) //
lpcount = lsr(lpcount,#7) //
}{
// scaling = combine(scaling.l,scaling.l) //
maxexp = vsplat(maxexp_in) //
min_offset = vsplat(min_offset_in) //
partial = neg(elements) //
}{
zero = #0 //
vscale = vsplat(scaling) //
mantbits = #23 //
p3 = sp1loop0(.quantize_loop,lpcount) //
}{
elements -= asl(lpcount,#7) //
l2fetch(in_ptr,l2fparam) //
}
.quantize_wait_l2fetch:
{ fr = usr // wait_for_l2fetch()
}{
p0 = cmp.gt(fr,#-1) //
if (!p0.new) jump:nt .quantize_wait_l2fetch //
}
.quantize_outerloop:
{ fetch_adr = in_ptr //
block = min(elements,maxblksize) // block size for next iteration
nop; nop //
}{
fetch_adr += asl(lpcount,#9) //
p2 = cmp.gt(block,#0) //
numvec = lsr(block,#5) //
lpcount = add(block,#127) //
}{
lpcount = lsr(lpcount,#7) //
if !p2 jump .quantize_loop //
l2fparam_l = combine(l2fparam_l.h,numvec.l) //
nop //
}{
l2fetch(fetch_adr,l2fparam) //
nop; nop; nop //
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.quantize_loop:
{ vals(0).cur = vmem(in_ptr++#1):nt //[0, 0]
s_exp(0).w = vasr(vals(0).w,mantbits) //[0, 0]
smant(2).w = vadd(smant(2).w,min_offset.w) //[1,16]
Q0 = vcmp.gt(zero.w,s_exp(3).w) //[1,16]
}{
exp(0) = vand(s_exp(0),const0000_00ff) //[0, 1]
mant(0) = vand(vals(0), const007f_ffff) //[0, 1]
mants(3).w = vlsr(mant(3).w,shift(3).w) //[1,17]
halves0.uh = vpack(word1.w,word0.w):sat //[1,17]
}{
shift(0).w = vsub(maxexp.w,exp(0).w) //[0, 2]
p0 = cmp.gtu(boundary,in_ptr) //[0, 2]
neg_mants(3).w = vsub(zero.w,mants(3).w) //[1,18]
word2.w = vmpye(smant(2).w,vscale.uh) //[1,18]
}{
mant(0) = vor(mant(0),const0080_0000) //[0, 3]
shift(0).w = vmin(shift(0).w,const31.w) //[0, 3]
if (!p0) in_ptr = sub(in_ptr,c128) //[0, 3]
smant(3) = vmux(Q0,neg_mants(3),mants(3)) //[1,19]
}{
Q0 = vcmp.gt(zero.w,s_exp(0).w) //[0, 4]
vals(1).cur = vmem(in_ptr++#1):nt //[0, 4]
s_exp(1).w = vasr(vals(1).w,mantbits) //[0, 4]
smant(3).w = vadd(smant(3).w,min_offset.w) //[1,20]
}{
mants(0).w = vlsr(mant(0).w,shift(0).w) //[0, 5]
exp(1) = vand(s_exp(1),const0000_00ff) //[0, 5]
mant(1) = vand(vals(1), const007f_ffff) //[0, 5]
p0 = cmp.gtu(boundary,in_ptr) //[0, 5]
}{
neg_mants(0).w = vsub(zero.w,mants(0).w) //[0, 6]
shift(1).w = vsub(maxexp.w,exp(1).w) //[0, 6]
if (!p0) in_ptr = sub(in_ptr,c128) //[0, 6]
word3.w = vmpye(smant(3).w,vscale.uh) //[1,21]
}{
smant(0) = vmux(Q0,neg_mants(0),mants(0)) //[0, 7]
mant(1) = vor(mant(1),const0080_0000) //[0, 7]
shift(1).w = vmin(shift(1).w,const31.w) //[0, 7]
}{
smant(0).w = vadd(smant(0).w,min_offset.w) //[0, 8]
Q0 = vcmp.gt(zero.w,s_exp(1).w) //[0, 8]
vals(2).cur = vmem(in_ptr++#1):nt //[0, 8]
s_exp(2).w = vasr(vals(2).w,mantbits) //[0, 8]
}{
mants(1).w = vlsr(mant(1).w,shift(1).w) //[0, 9]
exp(2) = vand(s_exp(2),const0000_00ff) //[0, 9]
mant(2) = vand(vals(2), const007f_ffff) //[0, 9]
halves1.uh = vpack(word3.w,word2.w):sat //[1,22]
}{
word0.w = vmpye(smant(0).w,vscale.uh) //[0,10]
neg_mants(1).w = vsub(zero.w,mants(1).w) //[0,10]
shift(2).w = vsub(maxexp.w,exp(2).w) //[0,10]
p0 = cmp.gtu(boundary,in_ptr) //[0,10]
}{
smant(1) = vmux(Q0,neg_mants(1),mants(1)) //[0,11]
mant(2) = vor(mant(2),const0080_0000) //[0,11]
shift(2).w = vmin(shift(2).w,const31.w) //[0,11]
if (!p0) in_ptr = sub(in_ptr,c128) //[0,11]
}{
smant(1).w = vadd(smant(1).w,min_offset.w) //[0,12]
Q0 = vcmp.gt(zero.w,s_exp(2).w) //[0,12]
vals(3).cur = vmem(in_ptr++#1):nt //[0,12]
s_exp(3).w = vasr(vals(3).w,mantbits) //[0,12]
}{
mants(2).w = vlsr(mant(2).w,shift(2).w) //[0,13]
exp(3) = vand(s_exp(3),const0000_00ff) //[0,13]
mant(3) = vand(vals(3), const007f_ffff) //[0,13]
bytes.b = vpacko(halves1.h,halves0.h) //[1,23]
}{
word1.w = vmpye(smant(1).w,vscale.uh) //[0,14]
neg_mants(2).w = vsub(zero.w,mants(2).w) //[0,14]
shift(3).w = vsub(maxexp.w,exp(3).w) //[0,14]
}{
smant(2) = vmux(Q0,neg_mants(2),mants(2)) //[0,15]
mant(3) = vor(mant(3),const0080_0000) //[0,15]
shift(3).w = vmin(shift(3).w,const31.w) //[0,15]
if p3 vmem(out_ptr++#1) = bytes //[1,24]
}:endloop0
/* --------------------------------------------------------------------------------------- */
{ elements -= asl(lpcount,#7) //
lc0 = lpcount //
}{
if p2 jump .quantize_outerloop //
}
.lp1end:
{ smant(2).w = vadd(smant(2).w,min_offset.w) //[1,16]
Q0 = vcmp.gt(zero.w,s_exp(3).w) //[1,16]
mask = vnot(zero) //
}{
mants(3).w = vlsr(mant(3).w,shift(3).w) //[1,17]
halves0.uh = vpack(word1.w,word0.w):sat //[1,17]
}{
neg_mants(3).w = vsub(zero.w,mants(3).w) //[1,18]
word2.w = vmpye(smant(2).w,vscale.uh) //[1,18]
}{
smant(3) = vmux(Q0,neg_mants(3),mants(3)) //[1,19]
mask = valign(zero,mask,partial) //
}{
smant(3).w = vadd(smant(3).w,min_offset.w) //[1,20]
}{
word3.w = vmpye(smant(3).w,vscale.uh) //[1,21]
Q1 = vcmp.gt(mask.ub,zero.ub) //
}{
halves1.uh = vpack(word3.w,word2.w):sat //[1,22]
}{
bytes.b = vpacko(halves1.h,halves0.h) //[1,23]
}{
if (Q1) vmem(out_ptr+#0) = bytes //[1,24]
}
/* --------------------------------------------------------------------------------------- */
{ jumpr r31 //return
}
.L_end:
/*=============================================================================*/
.size quantize_floats_to_8b_asm, .L_end-quantize_floats_to_8b_asm
/*=============================================================================*/
/* ------------------------------------------------------------------------------------------ */
/*
Desrciption
find min/max of floats, and compare with values in min/max buffers.
Requirements
Input/output must be aligned to 128 bytes.
*/
/* ------------------------------------------------------------------------------------------ */
.global find_minmax_of_floats_asm
.balign 32
.type find_minmax_of_floats_asm, @function
find_minmax_of_floats_asm:
/* ------------------------------------------------------------------------------------------ */
#define ptr_in r0 //ptr to input
#define length r1 //
#define ptr_minmax r2 //ptr to minmax
#define c80000000 r3 //
#define c4 r4 //
#define nrot r5 //
#define length_in_bytes r5 //
/* ------------------------------------------------------------------------------------------ */
#define new_in v0 //
#define new1 v1 //
#define fmax v2 //
#define fmin v3 //
#define new_t v4 //
#define const80000000 v5 //
#define const0 v6
#define minmax v0 //
#define minmax_t v1 //
#define shuf v7:6 //
#define shuf_L v6 //
#define shuf_H v7 //
/* --------------------------------------------------------------------------------------- */
{ c80000000 = ##0x80000000 //
fmax = #0 //
maxblksize = #4096 //
}{
block = min(length,maxblksize) //
const80000000 = vsplat(c80000000) //
fmin = #0 //
l2fparam_h = #128 //
}{
c4 = #4 //
const0 = #0 //
numvec = lsr(block,#5) //
length_in_bytes = asl(length,#2) //
}{
p0 = cmp.gt(numvec,#0) //
l2fparam_l = combine(l2fparam_h.l, numvec.l) //
Q1 = vsetq(length_in_bytes) //
length -= asl(numvec,#5) //
}{
new1 = #0 //
if !p0 jump .find_minmax_lp1end //
}{
l2fetch(ptr_in,l2fparam) //
}
.find_minmax_outerloop:
{ loop0(.find_minmax_loop,numvec) //
fetch_adr = ptr_in //
block = min(length,maxblksize) // block size for next iteration
}{
fetch_adr += asl(numvec,#7) //
numvec = lsr(block,#5) //
p2 = cmp.gt(block,#31) //
}
.find_minmax_wait_l2fetch:
{ fr = usr // wait_for_l2fetch()
nop //
}{
p0 = cmp.gt(fr,#-1) //
if (!p0.new) jump:nt .find_minmax_wait_l2fetch //
}{
if !p2 jump .find_minmax_loop //
l2fparam_l = combine(l2fparam_l.h,numvec.l) //
}{
l2fetch(fetch_adr,l2fparam) //
nop //
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.find_minmax_loop:
{ new_in.tmp = vmem(ptr_in++#1) //[0, 0]
fmax.w = vmax(fmax.w,new_in.w) //[0, 0]
new1 = vxor(new_in,const80000000) //[0, 0]
fmin.w = vmax(fmin.w,new1.w) //[1, 1]
}:endloop0
/* --------------------------------------------------------------------------------------- */
.find_minmax_lp0end:
{ length -= asl(numvec,#5) //
if p2 jump .find_minmax_outerloop //
}
.find_minmax_lp1end:
{ fmin.w = vmax(fmin.w,new1.w) //[1, 1]
p0 = bitsclr(length,#31) //
if (p0.new) jump:nt .find_minmax_cont //
}{
new_t.tmp = vmem(ptr_in+#0) //
new_in = vmux(Q1,new_t,const0) //
}{
fmax.w = vmax(fmax.w,new_in.w) //
new1 = vxor(new_in,const80000000) //
}{
fmin.w = vmax(fmin.w,new1.w) //
}
/* --------------------------------------------------------------------------------------- */
.find_minmax_cont:
{ shuf = vshuff(fmax,fmin,c4) //
loop0(.L_reduce_loop,#4) //
}{
minmax.w = vmax(shuf_L.w,shuf_H.w) //
nrot = #8 //
}
.L_reduce_loop:
{ minmax_t = vror(minmax,nrot) //
}{
minmax.w = vmax(minmax.w,minmax_t.w) //
nrot = asl(nrot,#1) //
}:endloop0
{ vmem(ptr_minmax+#0) = minmax //
}{
jumpr r31 //return
}
.L_end_1:
/*=============================================================================*/
.size find_minmax_of_floats_asm, .L_end_1-find_minmax_of_floats_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 22,657 | hexagon/asm_src/gemmacbbw_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gemm matrix multiply, result left at 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> 16*K*N/32+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 1040 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%8=0 M%128=0 */
/* C MODEL */
/* N = Nlen */
/* K = Klen | Kstride */
/* M = Mlen | Mstride */
/*======================================================================*/
#if 0
void gemmacbbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] += sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gemmacbbw_h.S"
.global gemmacbbw_asm
.balign 32
.type gemmacbbw_asm, @function
gemmacbbw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define n r3 //n %8 number of patches
#define m r4 //is stride of weights matrix k*32 always 32 wide
#define k r5 //ksize %16 | k - stride
/*=============================================================================*/
#define ksize r28 //amount of data in this job
#define ptr_zo r3 //results
#define ki r6 //
#define kstride7 r8 //
#define ptr_y r9 //
#define kjump r4 //16-8kstride
#define l1xptri0 r7 //
#define l1xptri1 r10 //
#define l1xptri2 r11 //
#define l1xptri3 r12 //
#define l1xptr r13 //
#define skip r14 //
#define back r15 //
#define kk M0 //
#define mm M1 //
#define x07x04x03x00 r17:16 //1111-----------1
#define x0fx0cx0bx08 r23:22 //11-------------1
#define x17x14x13x10 r19:18 //1111------------
#define x1fx1cx1bx18 r21:20 //11--------------
#define x27x24x23x20 r21:20 //---111----------
#define x2fx2cx2bx28 r23:22 //---11111--------
#define x37x34x33x30 r19:18 //----11----------
#define x3fx3cx3bx38 r17:16 //----1111--------
#define x47x44x43x40 r21:20 //-------111------
#define x4fx4cx4bx48 r19:18 //-------11111----
#define x57x54x53x50 r25:24 //--------11------
#define x5fx5cx5bx58 r17:16 //--------1111----
#define x67x64x63x60 r23:22 //----------111---
#define x6fx6cx6bx68 r25:24 //----------11111-
#define x77x74x73x70 r27:26 //-----------111--
#define x7fx7cx7bx78 r21:20 //-----------11111
#define x03x00 r16 //1111-----------1
#define x0bx08 r22 //11-------------1
#define x13x10 r18 //1111------------
#define x1bx18 r20 //11--------------
#define x23x20 r20 //---111----------
#define x2bx28 r22 //---11111--------
#define x33x30 r18 //----11----------
#define x3bx38 r16 //----1111--------
#define x43x40 r20 //-------111------
#define x4bx48 r18 //-------11111----
#define x53x50 r24 //--------11------
#define x5bx58 r16 //--------1111----
#define x63x60 r22 //----------111---
#define x6bx68 r24 //----------11111-
#define x73x70 r26 //-----------111--
#define x7bx78 r20 //-----------11111
#define x07x04 r17 //1111-----------1
#define x0fx0c r23 //11-------------1
#define x17x14 r19 //1111------------
#define x1fx1c r21 //11--------------
#define x27x24 r21 //---111----------
#define x2fx2c r23 //---11111--------
#define x37x34 r19 //----11----------
#define x3fx3c r17 //----1111--------
#define x47x44 r21 //-------111------
#define x4fx4c r19 //-------11111----
#define x57x54 r25 //--------11------
#define x5fx5c r17 //--------1111----
#define x67x64 r23 //----------111---
#define x6fx6c r25 //----------11111-
#define x77x74 r27 //-----------111--
#define x7fx7c r21 //-----------11111
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z2 v2 //
#define z3 v3 //
#define z4 v4 //
#define z5 v5 //
#define z6 v6 //
#define z7 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
/*=============================================================================*/
{ allocframe(#56) //
m = asl(m, #2) //ints
} {
memd(sp+#0) = r17:16 //
memw(sp+#48) = r28 //
ksize = lsr(k, #16) //extract work
k = zxth(k) //extract stride
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
n = lsr(n, #3) //divide by 4
mm = m //
} {
memd(sp+#8) = r19:18 //
kk = k //stride k
} {
memd(sp+#32) = r25:24 //
loop1(.L_loopN, n) //[ , P]for(i=0; i < n; i+=4){
} {
memd(sp+#40) = r27:26 //
kstride7 = asl(k, #3) //3*kstride
ptr_zo = ptr_z //
z0 = vmem(ptr_z++mm) //[P, 0]
} {
z1 = vmem(ptr_z++mm) //[P, 0]
ki = lsr(ksize, #4) //k / 16
kstride7 = sub(kstride7, k) //
l1xptr = ptr_x //l1 fetch 8 klines ahead
} {
z2 = vmem(ptr_z++mm) //[P, 0]
ptr_y = ptr_yi //[ , P]
kjump = sub(#16, kstride7) //zag back to next column of lines
} {
kstride7 += sub(k, ksize)
z3 = vmem(ptr_z++mm) //[P, 0]
l1xptri0 = add(l1xptr, #48) //[ , P]make temp copy
k = add(k, k)
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
dcfetch(l1xptri0+#0) //[0, 0]prefetch next line
skip = lsr(k, #1)
l1xptri1 = add(l1xptr, k) //[ , P]make temp copy
} {
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
l1xptri0 = add(l1xptri0, skip) //[0, 1]next line
l1xptr= addasl(l1xptr,k,#2) //[P, ]advance by 8k strip
} {
x0fx0cx0bx08 = memd(ptr_x+#8) //[0, 2]
back = sub(#32, skip)
z4 = vmem(ptr_z++mm) //[P, 0]
} {
x07x04x03x00 = memd(ptr_x++kk) //[0, 2]
z5 = vmem(ptr_z++mm) //[P, 0]
} {
l1xptri2 = add(l1xptri1, k) //[ , P]make temp copy
x1fx1cx1bx18 = memd(ptr_x+#8) //[0, 3]
z6 = vmem(ptr_z++mm) //[P, 0]
ki = add(ki, #-1) //
} {
l1xptri3 = add(l1xptri2, k) //[ , P]make temp copy
x17x14x13x10 = memd(ptr_x++kk) //[0, 3]
z7 = vmem(ptr_z++mm) //[P, 0]
loop0(.L_loopK, ki) //[P, 9]ki is k1/4 - 2
}
/*============================================================================*/
.balign 32
.L_loopN:
.L_loopK:
{
y2.cur = vmem(ptr_y++#2) //[0, 4]32x4
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 4]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 4]
dcfetch(l1xptri1+#0) //[0, 4]prefetch next line
} {
y3.cur = vmem(ptr_y+#-1) //[0, 5]32x4
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 5]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 5]
l1xptri1 = add(l1xptri1, skip) //[0, 5]next line
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 6]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 6]
x2fx2cx2bx28 = memd(ptr_x+#8) //[0, 6]
x27x24x23x20 = memd(ptr_x++kk) //[0, 6]
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 7]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 7]
x3fx3cx3bx38 = memd(ptr_x+#8) //[0, 7]
x37x34x33x30 = memd(ptr_x++kk) //[0, 7]
} {
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 8]
dcfetch(l1xptri2+#0) //[0, 8]prefetch next line
l1xptri2 = add(l1xptri2, skip) //[0, 8]next line
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 9]
dcfetch(l1xptri3+#0) //[0, 9]prefetch next line
l1xptri3 = add(l1xptri3, skip) //[0, 9]next line
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,10]
x4fx4cx4bx48 = memd(ptr_x+#8) //[0,10]
x47x44x43x40 = memd(ptr_x++kk) //[0,10]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,11]
x5fx5cx5bx58 = memd(ptr_x+#8) //[0,11]
x57x54x53x50 = memd(ptr_x++kk) //[0,11]
} {
z4.uw += vrmpy(y0.ub, x43x40.ub) //[0,12]
z5.uw += vrmpy(y0.ub, x53x50.ub) //[0,12]
skip = back //[0,12]next line
back = skip //[0,12]previous line + 32
} {
z4.uw += vrmpy(y1.ub, x47x44.ub) //[0,13]
z5.uw += vrmpy(y1.ub, x57x54.ub) //[0,13]
x6fx6cx6bx68 = memd(ptr_x+#8) //[0,13]
x67x64x63x60 = memd(ptr_x++kk) //[0,13]
} {
z4.uw += vrmpy(y2.ub, x4bx48.ub) //[0,14]
z5.uw += vrmpy(y2.ub, x5bx58.ub) //[0,14]
x7fx7cx7bx78 = memd(ptr_x+#8) //[0,14]
x77x74x73x70 = memd(ptr_x+#0) //[0,14]
} {
z4.uw += vrmpy(y3.ub, x4fx4c.ub) //[0,15]
z5.uw += vrmpy(y3.ub, x5fx5c.ub) //[0,15]
ptr_x = add(ptr_x, kjump) //[0,15]
} {
z6.uw += vrmpy(y0.ub, x63x60.ub) //[0,16]
z7.uw += vrmpy(y0.ub, x73x70.ub) //[0,16]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(l1xptri0+#0) //[1, 0]prefetch next line
} {
z6.uw += vrmpy(y1.ub, x67x64.ub) //[0,17]
z7.uw += vrmpy(y1.ub, x77x74.ub) //[0,17]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
l1xptri0 = add(l1xptri0, skip) //[1, 1]next line
} {
z6.uw += vrmpy(y2.ub, x6bx68.ub) //[0,18]
z7.uw += vrmpy(y2.ub, x7bx78.ub) //[0,18]
x0fx0cx0bx08 = memd(ptr_x+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x++kk) //[1, 2]
} {
z6.uw += vrmpy(y3.ub, x6fx6c.ub) //[0,19]
z7.uw += vrmpy(y3.ub, x7fx7c.ub) //[0,19]
x1fx1cx1bx18 = memd(ptr_x+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x++kk) //[1, 3]
}:endloop0
{
y2.cur = vmem(ptr_y++#2) //[1, 4]32x4
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 4]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 4]
l1xptri0 = l1xptr //[ , P]make temp copy
} {
y3.cur = vmem(ptr_y+#-1) //[1, 5]32x4
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 5]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 5]
l1xptri1 = add(l1xptr, k) //[ , P]make temp copy
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 6]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 6]
x2fx2cx2bx28 = memd(ptr_x+#8) //[1, 6]
x27x24x23x20 = memd(ptr_x++kk) //[1, 6]
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 7]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 7]
x3fx3cx3bx38 = memd(ptr_x+#8) //[1, 7]
x37x34x33x30 = memd(ptr_x++kk) //[1, 7]
} {
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 8]
vmem(ptr_zo++mm) = z0 //[E, ]
l1xptri2 = add(l1xptri1, k) //[ , P]make temp copy
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 9]
l1xptri3 = add(l1xptri2, k) //[ , P]make temp copy
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,10]
x4fx4cx4bx48 = memd(ptr_x+#8) //[1,10]
x47x44x43x40 = memd(ptr_x++kk) //[1,10]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,11]
x5fx5cx5bx58 = memd(ptr_x+#8) //[1,11]
x57x54x53x50 = memd(ptr_x++kk) //[1,11]
} {
z4.uw += vrmpy(y0.ub, x43x40.ub) //[1,12]
z5.uw += vrmpy(y0.ub, x53x50.ub) //[1,12]
vmem(ptr_zo++mm) = z1 //[E, ]
} {
z4.uw += vrmpy(y1.ub, x47x44.ub) //[1,13]
z5.uw += vrmpy(y1.ub, x57x54.ub) //[1,13]
x6fx6cx6bx68 = memd(ptr_x+#8) //[1,13]
x67x64x63x60 = memd(ptr_x++kk) //[1,13]
} {
z4.uw += vrmpy(y2.ub, x4bx48.ub) //[1,14]
z5.uw += vrmpy(y2.ub, x5bx58.ub) //[1,14]
x7fx7cx7bx78 = memd(ptr_x+#8) //[1,14]
vmem(ptr_zo++mm) = z2 //[E, ]
} {
z4.uw += vrmpy(y3.ub, x4fx4c.ub) //[1,15]
z5.uw += vrmpy(y3.ub, x5fx5c.ub) //[1,15]
x77x74x73x70 = memd(ptr_x+#0) //[1,14]
vmem(ptr_zo++mm) = z3 //[E, ]
} {
z6.uw += vrmpy(y0.ub, x63x60.ub) //[1,16]
z7.uw += vrmpy(y0.ub, x73x70.ub) //[1,16]
ptr_x = add(ptr_x, kjump) //[1,15]
vmem(ptr_zo++mm) = z4 //[E, ]
} {
z6.uw += vrmpy(y1.ub, x67x64.ub) //[1,17]
z7.uw += vrmpy(y1.ub, x77x74.ub) //[1,17]
vmem(ptr_zo++mm) = z5 //[E, ]
} {
z6.uw += vrmpy(y2.ub, x6bx68.ub) //[1,18]
z7.uw += vrmpy(y2.ub, x7bx78.ub) //[1,18]
ptr_x = add(ptr_x, kstride7) //jump to next block
} {
z6.uw += vrmpy(y3.ub, x6fx6c.ub) //[1,19]
vmem(ptr_zo++mm) = z6.new //[E, ]
skip = lsr(k, #1) //next line
} {
z7.uw += vrmpy(y3.ub, x7fx7c.ub) //[1,19]
vmem(ptr_zo++mm) = z7.new //[E, ]
back = sub(#32, skip) //previous line
} {
z0 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri0+#0) //[1, 4]prefetch next line
l1xptri0 = add(l1xptri0, skip)
} {
z1 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri1+#0) //[1, 4]prefetch next line
l1xptri1 = add(l1xptri1, skip)
} {
z2 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri2+#0) //[1, 4]prefetch next line
l1xptri2 = add(l1xptri2, skip)
} {
z3 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri3+#0) //[1, 4]prefetch next line
l1xptri3 = add(l1xptri3, skip)
} {
z4 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri0+#0) //[1, 4]prefetch next line
l1xptri0 = add(l1xptri0, back)
} {
z5 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri1+#0) //[1, 4]prefetch next line
l1xptri1 = add(l1xptri1, back)
} {
z6 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri2+#0) //[1, 4]prefetch next line
l1xptri2 = add(l1xptri2, back)
loop0(.L_prefetch, #2)
} {
z7 = vmem(ptr_z++mm) //[P, 0]
dcfetch(l1xptri3+#0) //[1, 4]prefetch next line
l1xptri3 = add(l1xptri3, back)
l1xptr= addasl(l1xptr,k,#2) //[P, ]advance by 8k strip
}
.balign 32
.L_prefetch:
{
dcfetch(l1xptri0+#0) //[1, 4]prefetch next line
l1xptri0 = add(l1xptri0, skip)
} {
dcfetch(l1xptri1+#0) //[1, 4]prefetch next line
l1xptri1 = add(l1xptri1, skip)
} {
dcfetch(l1xptri2+#0) //[1, 4]prefetch next line
l1xptri2 = add(l1xptri2, skip)
} {
dcfetch(l1xptri3+#0) //[1, 4]prefetch next line
l1xptri3 = add(l1xptri3, skip)
} {
dcfetch(l1xptri0+#0) //[1, 4]prefetch next line
l1xptri0 = add(l1xptri0, back)
} {
dcfetch(l1xptri1+#0) //[1, 4]prefetch next line
l1xptri1 = add(l1xptri1, back)
} {
dcfetch(l1xptri2+#0) //[1, 4]prefetch next line
l1xptri2 = add(l1xptri2, back)
} {
dcfetch(l1xptri3+#0) //[1, 4]prefetch next line
l1xptri3 = add(l1xptri3, back)
}:endloop0
{
x0fx0cx0bx08 = memd(ptr_x+#8) //[0, 2]
} {
dcfetch(l1xptri0+#0) //[0, 0]prefetch next line
x07x04x03x00 = memd(ptr_x++kk) //[0, 2]
ptr_y = ptr_yi //[ , P]
} {
y0 = vmem(ptr_y++#2) //[0, 0]32x4
x1fx1cx1bx18 = memd(ptr_x+#8) //[0, 3]
l1xptri0 = add(l1xptri0, skip) //[0, 1]next line
} {
x17x14x13x10 = memd(ptr_x++kk) //[0, 3]
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
loop0(.L_loopK, ki) //[P, 9]ki is k1/4 - 2
}:endloop1
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
r28 = memw(sp+#48)
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gemmacbbw_asm, .L_end-gemmacbbw_asm
|
XiaoMi/nnlib | 42,284 | hexagon/asm_src/gvconv2dbbb_circ_d32_v65_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Memory
CODESIZE 1128 bytes
STACK 112 bytes
Description
Utilize the v65 vrmpy instructions. Common wiehgts with 2 inputs and 2 outputs. 2 data inputs
are in a pair. Key is to feed each input with a different stream. Solution is to shuffle the
stream with a delayed version of itself. This doubles the size of the activations so a
smaller circular buffer of size filt_height*input depth*width*2.
Example depth = 16 shuffle blocks of 4 bytes together e.g. x00 =[x00.0,x00.1,x00.2,x00.3]
x00 x01 x02 x03|x10 x11 x12 x13|x20 x21 x22 x23|x30 x31 x32 x33
x40 x41 x42 x43|x50 x51 x52 x53|x60 x61 x62 x63|x70 x71 x72 x73
x80 x81 x82 x83|x90 x91 x92 x93|xa0 xa1 xa2 xa3|xb0 xb1 xb2 xb3
xc0 xc1 xc2 xc3|xd0 xd1 xd2 xd3|xe0 xe1 xe2 xe3|xf0 xf1 xf2 xf3
to
x00 x40 x01 x41 x02 x42 x03 x43|x10 x50 x11 x51 x12 x52 x13 x53|
x20 x60 x21 x61 x22 x62 x23 x63|x30 x70 x31 x71 x32 x72 x33 x73|
x40 x80 x41 x81 x42 x82 x43 x83|x50 x90 x51 x91 x52 x92 x53 x93|
x60 xa0 x61 xa1 x62 xa2 x63 xa3|x70 xb0 x71 xb1 x72 xb2 x73 xb3|
x80 xc0 x81 xc1 x82 xc2 x83 xc3|x90 xd0 x91 xd1 x92 xd2 x93 xd3|
xa0 xe0 xa1 xe1 xa2 xe2 xa3 xe3|xb0 xf0 xb1 xf1 xb2 xf2 xb3 xf3|
xc0 xc1 xc2 xc3 |xd0 xd1 xd2 xd3 |
xe0 xe1 xe2 xe3 |xf0 xf1 xf2 xf3 |
So each memd access into the buffer access two streams which are delayed from each other.
While this is occuring the sequence can be aligned so that the extra computation on the
ends can be minimized.
To further minimize memory the circular buffer is updated inside the kernel each
line.
The code only processes 32 sets of weights at once inner loop is optimized
to 6 packets.
*/
/*===============================================================================*/
.text
.file "gvconv2dbbb_circ_d32_v65_h.S"
.global gvconv2dbbb_circ_d32_v65_asm
.balign 32
.type gvconv2dbbb_circ_d32_v65_asm, @function
gvconv2dbbb_circ_d32_v65_asm:
/*===============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/*===============================================================================*/
/* ---------------------------------- CALL REGS -------------------------------- */
#define ptr_xi r0 //12 activation data held in circ buf
#define ptr_wi r1 //13 weights
#define ptr_zi r2 //14 results
#define next_inbuf_width r3 //(pad_l+in_width+pad_r)
#define out_width_depth r4 //next line amount
#define out_width r5 //15 amount of work to be done
#define stride_h_w r6 //30 stride_height, stride_width
#define in_depth r22 //31 input depth multiples of 32
#define filt_width r23 //32 horizontal fuilter width
#define filt_height r8 //33 filt_height lines per filter
#define out_height r9 //34 number of vertical lines to perform
#define ptr_filtsum r24 //35 includes the computation filt_sum * in_offset + biasvec
#define ptr_max r28 //36 maximum and minum buffer
#define recip_level r26 //37 255 / (MAX - MIN) - used to scale to bytes
#define next_out_width r7 //38 total width of image in num d32's
#define rpad_lpad r14 //39 rpad on right for circ buffer lpad alignment due to filt
#define ptr_cbufi r16 //40 read buffer pointer
#define zshift r21 //41 extra shift on output before quantization
#define in_zero r25 //42
#define cbuf_eob r18 //18 end of cuirc buffer
#define cbuf_size r19 //19 size in bytes of circ buf -1
//#define ptr_equalize r17 //
#define zshift_tmp r17
/* --------------------------------- SCALER REGS ------------------------------- */
#define delta r7 //difference ebetween stride height and filt_height
#define out_width_32 r7 //actual out_width in depth32
#define cm4 r2 //shuffle/deal ints
#define col_count r2 //horizontal counter
#define in_width_32 r3 //total input width in bytes in buffer
#define x71_x31 r17:16 //4n+1 inputs
#define x70_x30 r15:14 //4n+0 inputs
#define x61_x21 r15:14 //4n+1 inputs
#define x60_x20 r17:16 //4n+0 inputs
#define x51_x11 r15:14 //4n+1 inputs
#define x50_x10 r17:16 //4n+0 inputs
#define x40_x00 r15:14 //4n+0 inputs
#define x41_x01 r17:16 //4n+1 inputs
#define ptr_wi_ptr_xi r1:0 //
#define fetch_ptr_base r1 //base pointer for l1 prefetch
#define fetch_ptr r10 //current pointer for l1 prefetch
#define stride3 r11 //3*stride
#define stride r12 //current to next input
#define ptr_x0 r26 //base input pointer
#define ptr_x1 r13 //current input ptr
#define ptr_x10 r13 //current input ptr
#define ptr_x11 r27 //current input ptr
#define ptr_w0 r20 //even output depth 32 weights
#define ptr_z0 r0 //even output depth 32 outputs
#define ptr_z1 r25 //21 write buffer sp position and odd output depth 32 outputs
#define in_width_stride_depth r22 //step size from currnet to next logical line
#define adjust r10
/* ---------------------------------------------------------------------------- */
#define next_in_width32 r20
#define buf_fill r17 //22 number of total lines in circ buffer to write
#define stride_w r15 //horizontal stride
#define width r16 //width in 128byte blocks
#define width_cnt r14 //width remains in 128byte blocks
#define lpad r6 //left pad aligmnent
#define rpad r1 //=128bytes
#define l2fctrl r15:14
#define l2fctrl1 r15
#define l2fctrl0 r14
/* ---------------------------------- VEC REGS -------------------------------- */
#define vin_zero v9 //
#define x3x2x1x0 v10 //input data
#define x7x6x5x4 v11 //next input data
#define y3y2y1y0 v14 //aligned input data
#define y7y6y5y4 v15 //delayed aligned inout data
#define y7y6y5y4_y3y2y1y0 v15:14 //aligned data
#define ybyay9y8 v16 //delayed by 2 aligned data
#define z73z62 v13 //shuffled delayed input
#define z51z40 v12 //shuffled delayed input
#define z73z62_z51z40 v13:12 //shuffled output data
/* ---------------------------------------------------------------------------- */
#define wscale v15 //
#define s07_s03 v17:16 //even output accs 3,7
#define s06_s02 v15:14 //even output accs 2,6
#define s05_s01 v13:12 //even output accs 1,5
#define s04_s00 v11:10 //even output accs 0,4
#define s07 v17 //even acc 7
#define s06 v15 //even acc 6
#define s05 v13 //even acc 5
#define s04 v11 //even acc 4
#define s03 v16 //even acc 3
#define s02 v14 //even acc 2
#define s01 v12 //even acc 1
#define s00 v10 //even acc 0
#define s17_s13 v25:24 //even output accs 3,7
#define s16_s12 v23:22 //even output accs 2,6
#define s15_s11 v21:20 //even output accs 1,5
#define s14_s10 v19:18 //even output accs 0,4
#define s14 v19 //even output accs 3,7
#define s10 v18 //even output accs 0,4
#define s15 v21 //even output accs 1,5
#define s11 v20 //even output accs 1,5
#define s17 v25 //even output accs 3,7
#define s13 v24 //even output accs 3,7
#define s16 v23 //even output accs 2,6
#define s12 v22 //even output accs 2,6
#define vzero v30 //
#define w00 v0 //weights even 0-31
#define w01 v3 //weights even 32-63
#define vrecip v1 //reciprocal 255/MAx replicated
#define s0_sh v8 //round value
#define s1_sh v26 //round value
#define wsum0 v2 //sum of weights column + bias add 0-31
#define d010 v27 //even lines upper 16bit packed accs 0,1
#define d032 v28 //even lines upper 16bit packed accs 2,3
#define d03210 v28 //8bit shifted, packed saturated 0-3
#define d054 v29 //even lines upper 16bit packed accs 4,5
#define d076 v31 //even lines upper 16bit packed accs 6,7
#define d07654 v31 //8bit shifted, packed saturated 4-7
#if 0
#define maxo_maxe v5:4 //packed maxes
#define maxo v5 //odd maxes
#define maxe v4 //even maxes
#define mino_mine v7:6 //packed mins
#define mino v7 //odd mins
#define mine v6 //even mins
#else
#define gmax v5 //odd maxes
#define gmin v7 //odd mins
#define maxe v4 //even maxes
#define mine v6 //even mins
#define maxo v18 //odd maxes
#define mino v21 //odd mins
#endif
#define stmp v2 // temp for min/max
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/* --------------------------------------------------------------------------- */
{ allocframe(#112) //0th entry on stack is (112+8)/4=30 ints
stride_h_w = memw(sp+#0<<2) //stride horizontl and vertical
} {
memd(sp+#4<<2) = r21:20 //save 20,21
memd(sp+#6<<2) = r23:22 //save 22,23
r23 = #0x80000001
} {
maxe = vsplat(r23) // maxe <- -0x7fffffff
memd(sp+#0<<2) = r17:16 //save 16,17
memd(sp+#2<<2) = r19:18 //save 18,19
} {
memd(sp+#8<<2) = r25:24 //save 24,25
memd(sp+#10<<2) = r27:26 //save 26,27
mine.w = vabs(maxe.w) // mine <- +0x7fffffff
} {
memd(sp+#12<<2) = ptr_wi_ptr_xi //save weights:activation
memw(sp+#14<<2) = ptr_zi //save output ptr
vzero = #0 //vector zero
} {
filt_height = memw(sp+#33<<2) //filter height
in_depth = memw(sp+#31<<2) //input depth
stride = zxth(stride_h_w) //horizontal stride
} {
ptr_filtsum = memw(sp+#35<<2) //ptr to the sum of filters+offset
filt_width = memw(sp+#32<<2) //filter width
} {
out_height = memw(sp+#34<<2) //height of output
memw(sp+#15<<2) = out_width //save output width
cbuf_size = mpyi(filt_height, in_depth) //circular buffer size
stride = asl(stride, #5) //32 * stride_w
} {
cbuf_size = mpyi(cbuf_size, next_inbuf_width) //circular buffer size
stride3 = addasl(stride, stride, #1) //3Xstride
zshift = memw(sp+#40<<2) //final shift 7 + 16
in_zero = memw(sp+#41<<2) //
} {
ptr_cbufi = memw(sp+#39<<2) //circular buffer
cbuf_size = add(cbuf_size, cbuf_size) //x2
filt_height = mpyi(filt_height, in_depth) //total number of depth32 filter rows
in_zero = vsplatb(in_zero) //
} {
cbuf_eob = add(ptr_cbufi, cbuf_size) //end of circ buffer marker
filt_width = asl(filt_width, #2) //*32/8
next_out_width = memw(sp+#38<<2) //total width of output
dcfetch(ptr_xi+#0<<6) //
} {
ptr_max = memw(sp+#36<<2) //get max/min ptr
recip_level = memw(sp+#37<<2) //255/max
cbuf_eob = add(cbuf_eob, #-4) //make so comparison is >= eob
filt_width = add(filt_width, #-1) //account for epilog
} {
vrecip = vmem(recip_level++#1) //used to compress to 8bits 255/max
in_width_stride_depth=mpy(in_depth.L,stride_h_w.H)//
memw(sp+#21<<2) = ptr_cbufi //cbuf write ptr
} {
out_width_32 = asl(next_out_width, #5) //total output width in d32 format
in_width_stride_depth=mpyi(in_width_stride_depth,next_inbuf_width) //full logical line stride
gmax = vmem(ptr_max+#0) //
dcfetch(ptr_xi+#1<<6) //
} {
filt_height = lsr(filt_height, #5) //num d32 rows in filter
in_width_32 = asl(next_inbuf_width, #6) //next d32 line x 2
gmin = vmem(ptr_max+#1) //
dcfetch(ptr_xi+#2<<6) //
} {
filt_height = add(filt_height, #-1)
vin_zero = vsplat(in_zero) //
in_width_stride_depth=mpyi(in_width_stride_depth,#2)//locial input stride
dcfetch(ptr_xi+#3<<6) //
} {
memw(sp+#37<<2) = recip_level //255/max
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
}
/* -------------------------------------------------------------------------- */
.balign 32
.L_height:
{ ptr_z0 = memw(sp+#14<<2) //output ptr for even lines
ptr_x0 = memw(sp+#12<<2) //ptr_x0=ptr_cbufi read circ buffer input
//ptr_x0 = memw(sp+#39<<2) //ptr_x0=ptr_cbufi read circ buffer
out_height = add(out_height, #-1) //decrement height count
s14_s10 = vcombine(vzero, vzero) //
} { //buffer read ptr if ptr_xi >= buf_size-=size
ptr_x1 = add(ptr_x0, in_width_stride_depth) //next input line
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
memw(sp+#14<<2) += out_width_depth //update output ptr
col_count = memw(sp+#15<<2) //initialize width count
} {
p0 = cmp.gt(ptr_x1, cbuf_eob) //if >= circ buffer wrap around
if(p0.new) ptr_x1 = sub(ptr_x1, cbuf_size) //subtract size of buffer to create mod wrap
s15_s11 = vcombine(vzero, vzero) //
s16_s12 = vcombine(vzero, vzero) //
} {
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //if prefetch >= circ buffer wrap around
if(p1.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size) //wrap fetch ptr around independently
//memw(sp+#39<<2) = ptr_x1 //ptr_xi+=in_width*depth*stride_h % cbuf_size
s17_s13 = vcombine(vzero, vzero) //
nop
} {
loop1(.L_filt_height, filt_height) //setup vertical filte rloop
s04_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
s05_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
nop
} {
s06_s02 = vcombine(wsum0,wsum0) //init sum2 and 6
s07_s03 = vcombine(wsum0,wsum0) //init sum3 and 7
ptr_w0 = memw(sp+#13<<2) //access ptr weight
nop
}
.balign 32
.L_width:
.L_filt_height:
{ p2 = cmp.eq(filt_height, #0)
if(p2.new) jump:nt .L_last1
ptr_x10 = ptr_x0 //set up currne tinput ptr
ptr_x11 = add(ptr_x0, #8) //set up currne tinput ptr
} {
w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
x70_x30 = memd(ptr_x10+stride3<<#1) //[0, 0]load pt 3 and 7
fetch_ptr = add(fetch_ptr_base, #0) //initial fetch ptr
loop0(.L_filt_width, filt_width) //set up inne rloop for next time
} {
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x71_x31 = memd(ptr_x11+stride3<<#1) //[0, 1]
ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
fetch_ptr_base=add(fetch_ptr_base,in_width_32) //if >= buf_size -= buf_size
}
.balign 32
.L_filt_width:
{ dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0. 2]
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
} {
s17_s13.w+= vrmpy(w01.b, x71_x31.ub) //[0, 3]
x60_x20 = memd(ptr_x10+stride<<#2) //[0, 3]load pt 2 and 6
x61_x21 = memd(ptr_x11+stride<<#2) //[0, 3]
} {
s06_s02.w += vrmpy(w00.b, x60_x20.ub) //[0, 4]macc 2,6 out 0
s16_s12.w += vrmpy(w01.b, x61_x21.ub) //[0, 4]acc 2,3,6,7
x50_x10 = memd(ptr_x10+stride<<#1) //[0, 4]load pt 1 5
x51_x11 = memd(ptr_x11+stride<<#1) //[0, 4]
} {
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
s15_s11.w += vrmpy(w01.b, x51_x11.ub) //[0, 5]
x40_x00 = memd(ptr_x10++#2<<3) //[0, 5]load pts 0, 4
x41_x01 = memd(ptr_x11++#2<<3) //[0, 5]
} {
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 out 0
w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
x70_x30 = memd(ptr_x10+stride3<<#1) //[0, 0]load pt 3 and 7
} {
s14_s10.w += vrmpy(w01.b, x41_x01.ub) //[0, 6]
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x71_x31 = memd(ptr_x11+stride3<<#1) //[0, 1]
}:endloop0
{ dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //[E,10]
if(p1.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size)//[E,10]wrap around end fetch ptr
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
} {
s17_s13.w+= vrmpy(w01.b, x71_x31.ub) //[0, 3]
x60_x20 = memd(ptr_x10+stride<<#2) //[0, 3]load pt 2 and 6
x61_x21 = memd(ptr_x11+stride<<#2) //[0, 3]
nop
} {
s06_s02.w += vrmpy(w00.b, x60_x20.ub) //[0, 4]macc 2,6 out 0
s16_s12.w += vrmpy(w01.b, x61_x21.ub) //[0, 4]acc 2,3,6,7
x50_x10 = memd(ptr_x10+stride<<#1) //[0, 4]load pt 1 5
x51_x11 = memd(ptr_x11+stride<<#1) //[0, 4]
} {
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
s15_s11.w += vrmpy(w01.b, x51_x11.ub) //[0, 5]
x40_x00 = memd(ptr_x10++#2<<3) //[0, 5]load pts 0, 4
x41_x01 = memd(ptr_x11++#2<<3) //[0, 5]
} {
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
if(p0.new)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 out 0
s14_s10.w += vrmpy(w01.b, x41_x01.ub) //[0, 6]
}:endloop1
{ ptr_x11 = add(ptr_x0, #8) //set up currne tinput ptr
ptr_x10 = ptr_x0 //set up currne tinput ptr
nop; nop;
}
.L_last1:
{
loop0(.L_filt_width1, filt_width) //set up inne rloop for next time
ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
nop
} {
x70_x30 = memd(ptr_x10+stride3<<#1) //[0, 0]load pt 3 and 7
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
if(p0.new)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
nop
} {
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x71_x31 = memd(ptr_x11+stride3<<#1) //[0, 1]
fetch_ptr = addasl(ptr_x0, stride, #4) //stride*2*4 advance buffer by 8 output sinitial fetch ptr
nop
}
.balign 32
.L_filt_width1:
{
dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0. 2]
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
} {
s17_s13.w+= vrmpy(w01.b, x71_x31.ub) //[0, 3]
x60_x20 = memd(ptr_x10+stride<<#2) //[0, 3]load pt 2 and 6
x61_x21 = memd(ptr_x11+stride<<#2) //[0, 3]
} {
s06_s02.w += vrmpy(w00.b, x60_x20.ub) //[0, 4]macc 2,6 out 0
s16_s12.w += vrmpy(w01.b, x61_x21.ub) //[0, 4]acc 2,3,6,7
x50_x10 = memd(ptr_x10+stride<<#1) //[0, 4]load pt 1 5
x51_x11 = memd(ptr_x11+stride<<#1) //[0, 4]
} {
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
s15_s11.w += vrmpy(w01.b, x51_x11.ub) //[0, 5]
x40_x00 = memd(ptr_x10++#2<<3) //[0, 5]load pts 0, 4
x41_x01 = memd(ptr_x11++#2<<3) //[0, 5]
} {
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 out 0
w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
x70_x30 = memd(ptr_x10+stride3<<#1) //[0, 0]load pt 3 and 7
} {
s14_s10.w += vrmpy(w01.b, x41_x01.ub) //[0, 6]
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x71_x31 = memd(ptr_x11+stride3<<#1) //[0, 1]
}:endloop0
{
dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
s07_s03.w+= vrmpy(w00.b, x70_x30.ub) //[0, 3]macc 3,7 even row
} {
s17_s13.w+= vrmpy(w01.b, x71_x31.ub) //[0, 3]
x60_x20 = memd(ptr_x10+stride<<#2) //[0, 3]load pt 2 and 6
x61_x21 = memd(ptr_x11+stride<<#2) //[0, 3]
} {
s06_s02.w += vrmpy(w00.b, x60_x20.ub) //[0, 4]macc 2,6 out 0
s16_s12.w += vrmpy(w01.b, x61_x21.ub) //[0, 4]acc 2,3,6,7
x50_x10 = memd(ptr_x10+stride<<#1) //[0, 4]load pt 1 5
x51_x11 = memd(ptr_x11+stride<<#1) //[0, 4]
} {
s05_s01.w += vrmpy(w00.b, x50_x10.ub) //[0, 5]
s15_s11.w += vrmpy(w01.b, x51_x11.ub) //[0, 5]
x40_x00 = memd(ptr_x10++#2<<3) //[0, 5]load pts 0, 4
x41_x01 = memd(ptr_x11++#2<<3) //[0, 5]
} {
s04_s00.w += vrmpy(w00.b, x40_x00.ub) //[0, 6]acc 0,4,1,5 out 0
s14_s10.w += vrmpy(w01.b, x41_x01.ub) //[0, 6]
p2 = cmp.gt(col_count,#1); // do we need col 1?
col_count = add(col_count, #-8) //decrement width count by 8
}
/* ------------------------------------------------------------------------ */
{ s00.w = vadd(s00.w, s10.w) //combine reducnent accs
s01.w = vadd(s01.w, s11.w) //combine reducnent accs
s02.w = vadd(s02.w, s12.w) //combine reducnent accs
s03.w = vadd(s03.w, s13.w) //combine reducnent accs
} {
s04.w = vadd(s04.w, s14.w) //combine reducnent accs
s05.w = vadd(s05.w, s15.w) //combine reducnent accs
s06.w = vadd(s06.w, s16.w) //combine reducnent accs
s07.w = vadd(s07.w, s17.w) //combine reducnent accs
} {
mine.w = vmin(mine.w, s00.w) //min accumulation
maxe.w = vmax(maxe.w, s00.w) //max accumulation
s0_sh.w = vasl(s00.w, zshift) //o
stmp = s00; // keep for replacing s01 etc
// adjust = memw(sp+#23<<2)
} {
if(p2) stmp = s01; // col 1 if needed
p2 = cmp.gt(col_count,#-6); // do we need col 2?
p0 = cmp.gt(col_count, #-4) // => 4..7 need store
} {
maxe.w = vmax(maxe.w, stmp.w) //max accumulation col 1
mine.w = vmin(mine.w, stmp.w) //min accumulation col 1
if( p2) stmp = s02; // col 2 if needed
p2 = cmp.gt(col_count,#-5); // do we need col 3?
// ptr_x0 = sub(ptr_x0, adjust) //-=filt_height if stride_height > filt_height
} {
s1_sh.w = vasl(s01.w, zshift) //o
s00.w = vmpye(s0_sh.w, vrecip.uh) //
maxe.w = vmax(maxe.w, stmp.w) //max accumulation col 2
} {
mine.w = vmin(mine.w, stmp.w) //min accumulation col 2
if( p2) stmp = s03; // col 3 if needed
s00.w += vmpyo(s0_sh.w, vrecip.h):SSR //
ptr_x0 += mpyi(stride, #16) //stride*2*4 advance buffer by 8 outputs
} {
mine.w = vmin(mine.w, stmp.w) //min accumulation col 3
maxe.w = vmax(maxe.w, stmp.w) //max accumulation col 3
p2 = cmp.gt(col_count,#-3); // do we need col 5?
} {
s0_sh.w = vasl(s02.w, zshift) //o
s01.w = vmpye(s1_sh.w, vrecip.uh) //
if( p0) stmp = s04; // col 4 if needed
} {
s01.w += vmpyo(s1_sh.w, vrecip.h):SSR //
mine.w = vmin(mine.w, stmp.w) //min accumulation col 4
maxe.w = vmax(maxe.w, stmp.w) //max accumulation col 4
} {
if( p2) stmp = s05; // col 5 if needed
p2 = cmp.gt(col_count,#-2) // do we need col 6?
s1_sh.w = vasl(s03.w, zshift) //o
s02.w = vmpye(s0_sh.w, vrecip.uh) //
} {
d010.h = vpack(s01.w, s00.w):sat //pack high 16bits of accs
s02.w += vmpyo(s0_sh.w, vrecip.h):SSR //
} {
mine.w = vmin(mine.w, stmp.w) //min accumulation, col 5
maxe.w = vmax(maxe.w, stmp.w) //max accumulation, col 5
p1 = cmp.gt(col_count, #0) // not last iteration
if( p2) stmp = s06; // col 6 if needed
} {
p2 = cmp.gt(col_count,#-1); // do we need col 7?
s0_sh.w = vasl(s04.w, zshift) //o
mine.w = vmin(mine.w, stmp.w) //min accumulation col 6
s03.w = vmpye(s1_sh.w, vrecip.uh) //
} {
s03.w += vmpyo(s1_sh.w, vrecip.h):SSR //
maxe.w = vmax(maxe.w, stmp.w) //max accumulation col 6
if( p2) stmp = s07; // col 7 if needed
} {
zshift_tmp = mux(p1,#0,zshift) // 0; but 'zshift' on last iter
s1_sh.w = vasl(s05.w, zshift) //o
s04.w = vmpye(s0_sh.w, vrecip.uh) //
mine.w = vmin(mine.w, stmp.w) //min accumulation, col 7
} {
d032.h = vpack(s03.w, s02.w):sat //pack high 16bits of accs
s04.w += vmpyo(s0_sh.w, vrecip.h):SSR //
maxe.w = vmax(maxe.w, stmp.w) //max accumulation. col 7
} {
s0_sh.w = vasl(s06.w, zshift) //o
s05.w = vmpye(s1_sh.w, vrecip.uh) //
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
} {
d03210.ub = vpack(d032.h, d010.h):sat //shift 16bits by zshift
vmem(ptr_z0++#1):nt = d03210.new //store 0-3 even row
s05.w += vmpyo(s1_sh.w, vrecip.h):SSR //
maxe.w = vasl(maxe.w,zshift_tmp) // maxe <<= zshift (last loop only)
} {
s1_sh.w = vasl(s07.w, zshift) //o
s06.w = vmpye(s0_sh.w, vrecip.uh) //
s10 = #0
} {
d054.h = vpack(s05.w, s04.w):sat //pack high 16bits of accs
s06.w += vmpyo(s0_sh.w, vrecip.h):SSR //
p2 = cmp.gt(fetch_ptr_base, cbuf_eob) //if prefetch >= circ buffer wrap around
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
} {
if(p2)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size)//wrap fetch ptr around independently
s07.w = vmpye(s1_sh.w, vrecip.uh) //
s11 = #0
s12 = #0
} {
s07.w += vmpyo(s1_sh.w, vrecip.h):SSR //
s04_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
loop1(.L_filt_height, filt_height) //setup vertical filte rloop
//lc1 = filt_height
} {
s05_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
s13 = #0
s14 = #0
//loop0(.L_filt_width, filt_width) //setup inner filter loop
} {
d076.h = vpack(s07.w, s06.w):sat //pack high 16bits of accs
s06_s02 = vcombine(wsum0,wsum0) //init sum2 and 6
s15 = #0
//ptr_equalize = memw(sp+#43<<2) //
} {
s07_s03 = vcombine(wsum0,wsum0) //init sum3 and 7
ptr_w0 = memw(sp+#13<<2) //access ptr weight
s16 = #0
if(!p1) wscale = vrecip //
} {
s17 = #0
d07654.ub = vpack(d076.h, d054.h):sat //shift 16bits by zshift
if(p0) vmem(ptr_z0++#1):nt = d07654.new //store 4-7 even row
if(p1) jump .L_width //next 2 rows 8 points per row
}//endloop width
/* --------------------------------------------------------------------------- */
#if 0
//used only for activation 1st strategy
{ rpad_lpad = memw(sp+#39<<2) //retieve left pad
stride_w = lsr(stride, #5) //pick out horiz stride
buf_fill = memw(sp+#22<<2) //number of lines to write into buf
width = add(next_in_width, #3) //[S, 2]number of 128b blks
} {
width = sub(width.L, rpad_lpad.L) //next_in_width-lpad+3
cm4 = #-4 //[S, 1]shuffle ints
rpad = lsr(rpad_lpad, #18) //
ptr_x0 = memw(sp+#12<<2) //read from input activations
} {
width = lsr(width, #2)
ptr_z1 = memw(sp+#21<<2) //write pointer to circ buffer write
p0 = cmp.eq(stride_w, #1) //[S, 2]is stride = 1? (or 2)
next_in_width32 = asl(next_in_width, #5) //
} {
rpad = add(rpad, width)
lpad = asl(rpad_lpad, #5) //[S, 0]alignment
loop1(.L_rows, buf_fill) //[S, 2]outer loop
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_rows:
{ p3 =sp1loop0(.L_cols, rpad) //[P, 0]inner loop width
width_cnt = add(width, #-1) //[P, 0]
x3x2x1x0 = vmem(ptr_x0+#0) //[P, 0]load 1st 128
x7x6x5x4 = vin_zero //[P, 0]
} {
ptr_x1 = add(ptr_x0, #128) //[P, 1]
p1 = cmp.gt(width_cnt, #0) //[P, 1]
width_cnt = add(width_cnt, #-1) //[P, 1]
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 2]stride = 1 data
y3y2y1y0 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 2]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 2]update pipe
x7x6x5x4 = vin_zero //[P, 2]
} {
if(p0) ybyay9y8 = y3y2y1y0 //[P, 3]update 1 stage pipe y0 = y2
p1 = cmp.gt(width_cnt, #0) //[P, 3]
width_cnt = add(width_cnt, #-1) //[P, 3]
if(p0) jump .L_cols //[P, 3]skip 1 load
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[P, 4]stride = 2 data
y7y6y5y4 = valign(x7x6x5x4,x3x2x1x0, lpad) //[P, 4]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[P, 4]update pipe
x7x6x5x4 = vin_zero //[P, 4]
} {
p1 = cmp.gt(width_cnt, #0) //[P, 5]
width_cnt = add(width_cnt, #-1) //[P, 5]
}
.L_cols:
{ if( p0) y3y2y1y0 = ybyay9y8 //[0, 0]update 1 stage pipe y0 = y2
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[0, 0]
ybyay9y8 = valign(x7x6x5x4,x3x2x1x0, lpad) //[0, 0]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[0, 0]update pipe
} {
if(p3) vmem(ptr_z1++#1) = z73z62 //[0, 1]empty pipe
p1 = cmp.gt(width_cnt, #0) //[0, 1]
width_cnt = add(width_cnt, #-1) //[0, 1]
x7x6x5x4 = vin_zero //[0, 1]
} {
z73z62_z51z40= vshuff(ybyay9y8,y3y2y1y0,cm4) //[0, 2]shuffle up for new vrmpy
vmem(ptr_z1++#1) = z51z40.new //[0, 2]empty pipe
if(!p0) y3y2y1y0 = y7y6y5y4 //[0, 2]update 2 stage shift reg
if(!p0) y7y6y5y4 = ybyay9y8 //[0, 2]y0 = y1 || y2 = y2
}:endloop0
{ vmem(ptr_z1++#1) = z73z62 //[E, 0]empty pipe
ptr_x0 = addasl(ptr_x0, next_in_width, #5) //[E, 0]update input next row
}
/* --------------------------------------------------------------------------- */
{ p2 = cmp.gt(ptr_z1, cbuf_eob) //if circ buf write and end of buf
if(p2.new) ptr_z1 = sub(ptr_z1, cbuf_size) //then subtract buf size take to 1st row
}:endloop1
{
p0 = cmp.eq(out_height, #0) //are vertical lines done?
l2fctrl1 = next_in_width32
l2fctrl0 = combine(next_in_width32.L, buf_fill.L)
if(p0.new) jump:nt .L_domax //avoid last l2 fetch
} {
l2fetch(ptr_x0, l2fctrl)
} {
memw(sp+#12<<2) = ptr_x0 //save last read from input activations
memw(sp+#21<<2) = ptr_z1 //last write for circ buffer
if(!p0) jump:t .L_height //then go again
}
#else
// maxe has already had <<= zshift; mine still needs it.
{ maxo.w = vmpye(maxe.w, wscale.uh) //[1, 0]equalize max's
mine.w = vasl( mine.w, zshift)
//memw(sp+#43<<2) = ptr_equalize //
recip_level = memw(sp+#37<<2) //255/max
p0 = cmp.eq(out_height, #0) //are vertical lines done?
} {
col_count = #0x80000001 // init for maxe
mino.w = vmpye(mine.w, wscale.uh) //[1, 1]equalize min's
if(!p0)vrecip = vmem(recip_level++#1) //used to compress to 8bits 255/max
} {
ptr_z0 = abs(col_count) // init for mine
memw(sp+#37<<2) = recip_level //255/max
maxo.w+= vmpyo(maxe.w, wscale.h):SSR //[1, 2]equalize max's
} {
gmax.w = vmax(gmax.w, maxo.w)
mino.w+= vmpyo(mine.w,wscale.h):SSR //[1, 3]equalize min's
ptr_filtsum = add(ptr_filtsum, #128) //
//mine = #0 //
} {
gmin.w = vmin(gmin.w, mino.w)
if(!p0) jump:nt .L_height //then go again
maxe = vsplat(col_count) //
mine = vsplat(ptr_z0) //
}
#endif
/* ------------------------------------------------------------------------ */
#if 0
.L_domax:
{ ptr_max = memw(sp+#36<<2) //get max/min ptr
cm4 = #-4 //define int based deal
} {
loop0(.L_peak, #4) //set up vec reduce
maxo_maxe = vdeal(maxe, maxe, cm4) //deal out odd and even
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //reduce
mino_mine = vdeal(mine, mine, cm4) //split out and and even min
} {
mine.w = vmin(mine.w, mino.w) //reduce mins by 2
} {
maxo_maxe = vdeal(maxe, maxe, cm4) //split out odd and even max
}:endloop0
{ maxe.w = vmax(maxo.w, maxe.w) //reduce max
vmem(ptr_max+#0) = maxe.new //store max
mino_mine = vdeal(mine, mine, cm4) //split out mins
} {
mine.w = vmin(mino.w, mine.w) //reduce mins to final 1
vmem(ptr_max+#1) = mine.new //store min
}
/* ------------------------------------------------------------------------ */
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#2<<2) //18,19
} {
#else
{ vmem(ptr_max+#0) = gmax //store max
r17:16 = memd(sp+#0) //restore stack
} {
vmem(ptr_max+#1) = gmin //store min
r19:18 = memd(sp+#2<<2) //18,19
} {
#endif
r21:20 = memd(sp+#4<<2) //20,21
r23:22 = memd(sp+#6<<2) //22,23
} {
r25:24 = memd(sp+#8<<2) //24,25
r27:26 = memd(sp+#10<<2) //26,27
} {
dealloc_return //
}
/* ------------------------------------------------------------------------ */
.L_end:
/* ======================================================================== */
.size gvconv2dbbb_circ_d32_v65_asm, .L_end-gvconv2dbbb_circ_d32_v65_asm
|
XiaoMi/nnlib | 18,328 | hexagon/asm_src/histogram_h.S | /*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Description
Build histogram of 8-bit inputs
void histogram_flat_asm(
uint16_t * histo, // output: nbatches*256 histogram. Vec aligned.
uint8_t const * data, // input: data[0..depth-1] is for first histo. Any alignment.
int depth, // depth of input (must be 1..65535 -- see below)
int nbatches, // # of batches to process; >= 1.
int batch_stride ); // offset from batch to batch (when nbatches>1) Any alignment.
void histogram_d32_asm(
uint16_t * histo, // output: (nbatches*4)*256 histogram. Vec aligned.
uint8_t const * data, // input: data for first histo. must be 128 aligned
int depth, // depth of input (must be 1..65535 -- see below)
int nbatches, // # of batches to process; >= 1. Each batch is 4 width units.
int d32_stride ); // offset from each d32 to next, when depth > 32. Multiple of 128.
Overflow may occur if depth > 65535. Since the results are accumulated in sections, and the sections are summed
using saturated adds, the results will be saturated (rather than overflowed) if each section has <= 65535 samples;
for 'flat' mode this is guaranteed if depth <= 524,160; for 'd32' it is guaranteed when depth <= 131040.
Reference (does not model overflow):
void histogram_flat_asm(uint16_t * histo, uint8_t const * data, int depth, int nbatches, int batch_stride )
{
memset( histo, 0, nbatches * 256 * sizeof(uint16_t));
for( int i =0; i < nbatches; i ++ ){
for( int k = 0; k < depth; k++ ) histo[i*256 + data[i*batch_stride + k]] ++;
}
}
void histogram_d32_asm(uint16_t * histo, uint8_t const * data, int depth, int nbatches, int d32_stride )
{
memset( histo, 0, nbatches*4 * 256 * sizeof(uint16_t));
for( int i =0; i < nbatches*4; i ++ ){
uint8_t const *rpd = &data[i*32];
for( int k = 0; k < depth; k++){
uint8_t val = rdp[(k&31) + (k>>5)*d32_stride ];
histo[i*256 + val ] ++;
}
}
}
*/
#define histo_ptr r0 //ptr to output
#define in_ptr0 r1 //ptr to input
#define depth r2 //
#define nbatches r3 //
#define batch_stride r4 //
#define rconst r5
#define srcalign r6
#define in_ptr r8 // inner loop load pointer
#define endpoint r9 // srcalign + depth
#define inner_loopcount r10
#define histo_ptr_tmp r11
#define rtmp r7
#define pf_lo r12
#define pf_hi r13
#define pf_hi_lo r13:12
//
// M0 contains the vector-to-vector stride (#128 for _flat, and d32_stride for _d32.
// P3 is false for 'flat', true for 'd32'
//
/* ------------------------------------------------------------------------------------------ */
.text
.global histogram_flat_asm
.balign 32
.type histogram_flat_asm @function
histogram_flat_asm:
/* ------------------------------------------------------------------------------------------ */
{
#if defined(HIST_WITH_L2PREF)
pf_lo = satuh(depth)
rconst = #1
in_ptr = in_ptr0;
} {
pf_lo = combine(pf_lo.l,rconst.l) // (depth: 1 )
v31:30.w = vsub(v31:30.w,v31:30.w)
pf_hi = batch_stride
} {
l2fetch( in_ptr0, pf_hi_lo )
#else
in_ptr = in_ptr0;
v31:30.w = vsub(v31:30.w,v31:30.w)
#endif
rconst = #128
} {
q1 = vcmp.eq(v0.h,v0.h) // q1 = all 1's always
p3 = cmp.gt(r0,r0) // p3 = false for 'flat' case
v29:28 = vcombine(v30,v30)
} {
loop1( .L_flatloop,nbatches)
v27:26 = vcombine(v30,v30)
v25:24 = vcombine(v30,v30)
srcalign = and(in_ptr,#127) // get src alignment
} {
v23:22 = vcombine(v30,v30)
v21:20 = vcombine(v30,v30)
endpoint = add(srcalign,depth); // src align + length
} {
v19:18 = vcombine(v30,v30)
v17:16 = vcombine(v30,v30)
} {
m0 = rconst // make this #128
v15:14 = vcombine(v30,v30)
v13:12 = vcombine(v30,v30)
}
// Outer loop for 'flat version starts here.
.L_flatloop:
// check pointer align etc.
// inner_loop_count = ((srcalign + depth + 127)>>7) - 2
// ((srcalign + depth - 129)>>7
{
inner_loopcount= add(endpoint,#-129)
q0 = vsetq(in_ptr)
in_ptr0 = add(in_ptr0, batch_stride ); // next batch ptr
v5:4 = vcombine(v30,v30)
} {
inner_loopcount= asr(inner_loopcount,#7)
v3:2 = vcombine(v30,v30)
v1:0 = vcombine(v30,v30)
#if __HEXAGON_ARCH__ >= 62
} {
loop0( .L_histloop, inner_loopcount)
p1 = cmp.gt( inner_loopcount, #-1) // go to special case code if inner_loopcount < 0
q2 = vsetq2(endpoint) // get end mask
srcalign = and(in_ptr0,#127); // srcalign for next iter
} {
q0 = not(q0)
v11:10 = vcombine(v30,v30);
}
#else
rconst = and( endpoint,#127)
} {
loop0( .L_histloop, inner_loopcount)
p0 = cmp.eq(rconst,#0)
q2 = vsetq(endpoint)
if(!p0.new) jump:t .L_zeroq2
} {
q2 = not(q2)
}
.L_zeroq2:
{
p1 = cmp.gt( inner_loopcount, #-1) // go to special case code if inner_loopcount < 0
srcalign = and(in_ptr0,#127); // srcalign for next iter
q0 = not(q0)
v11:10 = vcombine(v30,v30);
}
#endif
{
v7:6 = vcombine(v30,v30)
v9:8 = vcombine(v30,v30)
} {
p2 = cmp.gt(inner_loopcount,#0)
endpoint = add(srcalign,depth); // end point for next iter.
if(!p1) jump:nt .L_for_onevec
} {
v0.tmp = vmem(in_ptr++M0) // first (mabyte partial) vector
vhist(q0);
if (!p2) jump:nt .L_last_histo
}
.balign 32
.L_histloop:
{
v0.tmp = vmem(in_ptr++M0)
vhist(q1)
}:endloop0
.balign 32
.L_last_histo:
{
v0.tmp = vmem(in_ptr+#0)
vhist(q2)
p0 = cmp.gt(nbatches,#1)
} {
rconst = #16
in_ptr = in_ptr0;
#if defined(HIST_WITH_L2PREF)
if( !p0 ) jump:nt .L_no_prefetch // skip prefetch if last batch iteration.
}
l2fetch( in_ptr0, pf_hi_lo );
.L_no_prefetch:
#else
}
#endif
// histo is done. Do the shuffles and adds, and in the process
// clear regs v12..v31 to be ready for the next loop.
{ // 16 shuffles, 16 adds... pack v0..v31 to v0..15
v1:0 = vshuff( v1,v0, rconst)
nbatches = add(nbatches,#-1)
} {
v0.uh = vadd(v0.uh,v1.uh):sat
v3:2 = vshuff( v3,v2, rconst)
} {
v1.uh = vadd(v2.uh,v3.uh):sat
v3:2 = vshuff( v5,v4, rconst)
} {
v2.uh = vadd(v2.uh,v3.uh):sat
v5:4 = vshuff( v7,v6, rconst)
} {
v3.uh = vadd(v4.uh,v5.uh):sat
v5:4 = vshuff( v9,v8, rconst)
} {
v4.uh = vadd(v4.uh,v5.uh):sat
v7:6 = vshuff( v11,v10, rconst)
} {
v5.uh = vadd(v6.uh,v7.uh):sat
v7:6 = vshuff( v13,v12, rconst)
} {
v6.uh = vadd(v6.uh,v7.uh):sat
v9:8 = vshuff( v15,v14, rconst)
} {
v7.uh = vadd(v8.uh,v9.uh):sat
v9:8 = vshuff( v17,v16, rconst)
} {
v8.uh = vadd(v8.uh,v9.uh):sat
v11:10 = vshuff( v19,v18, rconst)
v18 = vxor(v18,v18) // start clearing regs v18..25
} {
v9.uh = vadd(v10.uh,v11.uh):sat
v19 = v18
v11:10 = vshuff( v21,v20, rconst)
} {
v10.uh = vadd(v10.uh,v11.uh):sat
v13:12 = vshuff( v23,v22, rconst)
v20 = v18
} {
v11.uh = vadd(v12.uh,v13.uh):sat
v13:12 = vshuff( v25,v24, rconst)
v21 = v18
} {
v12.uh = vadd(v12.uh,v13.uh):sat
v15:14 = vshuff( v27,v26, rconst)
v22 = v18
} {
v13.uh = vadd(v14.uh,v15.uh):sat
v15:14 = vshuff( v29,v28, rconst)
v23 = v18
} {
v14.uh = vadd(v14.uh,v15.uh):sat
v17:16 = vshuff( v31,v30, rconst)
rconst = #32
v24 = v18
} {
v15.uh = vadd(v16.uh,v17.uh) :sat
v25 = v18
// for d32 mode, this is all the adds we need, but we still
// need to shuffle things to get the right ordering.
//////////////////////////////////////////////////
// 8 shuffles, 8 adds... pack v0..v15 to v0..v7
// continue clearing v26..v31 and v12..v17
v1:0 = vshuff( v1,v0, rconst)
if( p3 ) jump .L_pack_for_d32
} {
v0.uh = vadd(v0.uh,v1.uh):sat
v3:2 = vshuff( v3,v2, rconst)
v26 = v18
} {
v1.uh = vadd(v2.uh,v3.uh):sat
v3:2 = vshuff( v5,v4, rconst)
v27 = v18
} {
v2.uh = vadd(v2.uh,v3.uh):sat
v5:4 = vshuff( v7,v6, rconst)
v28 = v18
} {
v3.uh = vadd(v4.uh,v5.uh):sat
v5:4 = vshuff( v9,v8, rconst)
v29 = v18
} {
v4.uh = vadd(v4.uh,v5.uh):sat
v7:6 = vshuff( v11,v10, rconst)
v30 = v18
} {
v5.uh = vadd(v6.uh,v7.uh):sat
v7:6 = vshuff( v13,v12, rconst)
v31 = v18
} {
v6.uh = vadd(v6.uh,v7.uh):sat
v9:8 = vshuff( v15,v14, rconst)
v17 = v18
rconst = #64
} {
v7.uh = vadd(v8.uh,v9.uh):sat
//////////////////////////////////////////////////
// 4 shuffles, 4 adds... pack v0..v7 to v0..v3
v1:0 = vshuff( v1,v0, rconst)
v16 = v18
} {
v0.uh = vadd(v0.uh,v1.uh):sat
v3:2 = vshuff( v3,v2, rconst)
v15 = v18
vmem(histo_ptr++#1)= v0.new
} {
v1.uh = vadd(v2.uh,v3.uh):sat
v3:2 = vshuff( v5,v4, rconst)
v14 = v18
vmem(histo_ptr++#1)= v1.new
} {
v2.uh = vadd(v2.uh,v3.uh):sat
v5:4 = vshuff( v7,v6, rconst)
v13 = v18
vmem(histo_ptr++#1)= v2.new
} {
v3.uh = vadd(v4.uh,v5.uh):sat
vmem(histo_ptr++#1)= v3.new
v12 = v18
}:endloop1
{
jumpr r31 // return from 'flat' case
}
///// for 'flat' case when everything is in one vector
.L_for_onevec:
{
q2 = and(q2,q0); // get single-word mask
jump .L_last_histo
}
///////////////////////////////////
// continue packing for the d32 case
///////////////////////////////////
.L_pack_for_d32:
// rconst = 32, we have histograms in v0..v15, each reg contains 4x16 sums belonging
// to the 4 output histos. So we need to do a 16x16 transpose to the four results.
// v1:v0 have already been transposed in-place; and v18..v25 are cleared.
// start by transposing all the other pairs in-place, while clearing v26..v31
{
v15:14 = vshuff( v15,v14, rconst)
v27:26 = vcombine(v18,v18)
//p0 = cmp.gt(nbatches,#0) // any batches left after this? (already set)
} {
v13:12 = vshuff( v13,v12, rconst)
v29:28 = vcombine(v18,v18)
in_ptr0 = add( in_ptr0 ,#128) // move to next vector...
} {
v11:10 = vshuff( v11,v10, rconst)
v31:30 = vcombine(v18,v18) // v18..v31 are cleared now
} {
v9:8 = vshuff( v9,v8, rconst)
histo_ptr_tmp = add(histo_ptr,#(4*128)) // ->[1][0] to address first 8 vecs
} {
v7:6 = vshuff( v7,v6, rconst)
histo_ptr = add(histo_ptr_tmp,#(8*128)) // ->[3][0] to address second 8 vecs
} {
loop0( .L_histloop, inner_loopcount) // restore inner loop count
v5:4 = vshuff( v5,v4, rconst)
} {
v3:2 = vshuff( v3,v2, rconst) // now we have 2x32 in each reg.
rconst = #64
} {
v17:16 = vshuff( v15,v13, rconst); // outputs [1][3] and [3][3]
vmem(histo_ptr+#3) = v17.new // [3][3]
} {
v15:14 = vshuff( v14,v12, rconst); // outputs [0][3] and [2][3]
vmem(histo_ptr+#-1) = v15.new // [2][3]
} {
v17:16 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#3) = v16 // [1][3]
} {
v15:14 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#-1) = v14 // [0][3]
} {
v13:12 = vshuff( v11,v9,rconst) // [1][2] and [3][2]
vmem(histo_ptr+#2) = v13.new // [3][2]
} {
v11:10 = vshuff( v10,v8, rconst); // [0][2] and [2][2]
vmem(histo_ptr+#-2) = v11.new // [2][2]
} {
v13:12 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#2) = v12 // [1][2]
} {
v11:10 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#-2) = v10 // [0][2]
} {
v9:8 = vshuff( v7,v5,rconst) // [1][1] and [3][1]
vmem(histo_ptr+#1) = v9.new // [3][1]
} {
v7:6 = vshuff( v6,v4, rconst); // [0][1] and [2][1]
vmem(histo_ptr+#-3) = v7.new // [2][1]
} {
v9:8 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#1) = v8 // [1][1]
} {
v7:6 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#-3) = v6 // [0][1]
} {
v5:4 = vshuff( v3,v1,rconst) // [1][0] and [3][0]
vmem(histo_ptr+#0) = v5.new // [3][0]
} {
v3:2 = vshuff( v2,v0, rconst); // [0][0] and [2][0]
vmem(histo_ptr+#-4) = v3.new // [2][0]
} {
v5:4 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#0) = v4 // [1][0]
histo_ptr = add(histo_ptr, #(4*128))
} {
v3:2 = vcombine(v31,v31)
vmem(histo_ptr_tmp+#-4) = v2 // [0][0]
if(!p0) jumpr r31 // done if no more batches
} {
v1:0 = vcombine(v31,v31)
if(p2) jump:nt .L_last_histo; // just one d32 slice
jump .L_histloop;
}
.L_end:
.size histogram_flat_asm, .L_end-histogram_flat_asm
/////////////////////////////////////////////////////////////////
// Entry point for histogram_d32_asm
/////////////////////////////////////////////////////////////////
.text
.global histogram_d32_asm
.balign 32
.type histogram_d32_asm @function
histogram_d32_asm:
{
rconst = #128
inner_loopcount = add(depth,#31); // starting (depth+31)>>5
#if defined(HIST_WITH_L2PREF)
pf_hi = batch_stride // this is d32 stride
#endif
depth = and(depth,#31)
} {
p3 = cmp.eq(depth,#0) // is a multiple of 32?
inner_loopcount = lsr(inner_loopcount,#5) // # of d32 slices
v31:30.w = vsub(v31:30.w,v31:30.w)
} {
#if defined(HIST_WITH_L2PREF)
pf_lo = combine(rconst.l,inner_loopcount.l) // (128: nd32 )
#endif
inner_loopcount = add(inner_loopcount,#-1) // now it's the proper loopcount
rtmp = add(PC,##const_Count32@PCREL)
} {
if(p3) depth = #32
in_ptr = in_ptr0;
#if defined(HIST_WITH_L2PREF)
l2fetch( in_ptr0, pf_hi_lo )
#endif
} {
depth = vsplatb(depth)
v29:28 = vcombine(v30,v30)
} {
v0 = vmem(rtmp+#0) // get vector [0,1, .. 31, 0,1... 31, 0...31, 0...31 ]
v1 = vsplat(depth)
v27:26 = vcombine(v30,v30)
} {
p2 = !cmp.gt( inner_loopcount, #0)
q2 = vcmp.gt(v1.ub, v0.ub) // last-slice conditional mask (1..32 true in each quadrant)
q1 = vcmp.eq(v0.h,v0.h) // q1 = all 1's always
} {
v25:24 = vcombine(v30,v30)
p3 = cmp.eq(r0,r0) // p3 = true for 'd32' case
m0 = batch_stride
} {
loop0( .L_histloop, inner_loopcount)
v23:22 = vcombine(v30,v30)
v21:20 = vcombine(v30,v30)
} {
v19:18 = vcombine(v30,v30)
v17:16 = vcombine(v30,v30)
} {
v15:14 = vcombine(v30,v30)
v13:12 = vcombine(v30,v30)
} {
v11:10 = vcombine(v30,v30)
v9:8 = vcombine(v30,v30)
} {
v7:6 = vcombine(v30,v30)
v5:4 = vcombine(v30,v30)
in_ptr0 = add(in_ptr0, #128) // address for next outer loop
} {
v3:2 = vcombine(v30,v30)
v1:0 = vcombine(v30,v30)
if(p2) jump:nt .L_last_histo; // just one d32 slice
jump .L_histloop;
}
.L_endd32:
.size histogram_d32_asm, .L_endd32-histogram_d32_asm
|
XiaoMi/nnlib | 14,548 | hexagon/asm_src/gvmmacbbw_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gemmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gemm matrix multiply accumulate, */
/* result left at 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> w*D*N/256+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 1056 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvmmacbbw_h.S"
.global gvmmacbbw_asm
.balign 32
.type gvmmacbbw_asm, @function
gvmmacbbw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define out_width r3 //out_width
#define skip_back r4 //(out_width4 - outwidth-1)*skip back*stride*depth
#define m r5 //is stride of weights matrix *32 always 32 wide
#define stride r6 //stride*depth
#define filt_width r7 //depth*filt_width
#define out_height r8 //number of vertical lines to perform
#define PREFETCH 64 //hwo far ahead to fetch data of ptrs
/*=============================================================================*/
#define ki r9 //
#define ptr_x0 r21
#define ptr_x1 r20
#define ptr_x2 r22
#define ptr_x3 r11
#define ptr_y r10 //
#define out_width4 r24
#define col_count r23
//01234567
#define x07x04x03x00 r13:12 //11-----1
#define x07x04 r13 //11-----1
#define x03x00 r12 //1------1
#define x0fx0cx0bx08 r15:14 //1111---1
#define x0fx0c r15 //1111---1
#define x0bx08 r14 //111----1
#define x17x14x13x10 r19:18 //11------
#define x17x14 r19 //11------
#define x13x10 r18 //1-------
#define x1fx1cx1bx18 r17:16 //1111----
#define x1fx1c r17 //1111----
#define x1bx18 r16 //111-----
#define x27x24x23x20 r13:12 //---111--
#define x27x24 r13 //---111--
#define x23x20 r12 //---11---
#define x2fx2cx2bx28 r19:18 //---1111-
#define x2fx2c r19 //---11111
#define x2bx28 r18 //---1111-
#define x37x34x33x30 r15:14 //----11--
#define x37x34 r15 //----11--
#define x33x30 r14 //----1---
#define x3fx3cx3bx38 r17:16 //----1111
#define x3fx3c r17 //----1111
#define x3bx38 r16 //----111-
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vzero v12 //
#define WO 1
/*=============================================================================*/
{
stride = memw(sp+#0<<2) //extract stride*depth
filt_width = memw(sp+#1<<2) //extract filt_width*depth
m = asl(m, #2) //ints
} {
out_height = memw(sp+#2<<2) //number of output lines
allocframe(#64) //
} {
M0 = m //
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
ki = lsr(filt_width, #4) //k / 16
m = mpyi(m, #-3)
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
vzero = #0 //
out_width4 = add(out_width, #3)
} {
M1 = m //
out_width4 = lsr(out_width4, #2)
ki = add(ki, #-1) //
}
/*============================================================================*/
.balign 32
.L_height:
{
loop1(.L_width, out_width4) //[ , P]for(i=0; i < n; i+=4){
col_count = out_width
out_height = add(out_height, #-1)
ptr_y = ptr_yi //[ , P]
}
.balign 32
.L_width:
{
#if WO
y0 = vmem(ptr_y++#2) //[0, 0]32x4
#endif
dcfetch(ptr_x+#PREFETCH) //[0, 0]prefetch next line
} {
#if WO
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
#endif
dcfetch(ptr_x1+#PREFETCH) //[0, 1]prefetch next line
ptr_x0 = ptr_x
ptr_x1 = add(ptr_x, stride) // x1 = x0 + depth*stride
} {
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
z0 = vmem(ptr_z++M0)
#if WO
#endif
} {
z1 = vmem(ptr_z++M0)
#if WO
#endif
x07x04x03x00 = memd(ptr_x0++#16) //[0, 2]
ptr_x2 = add(ptr_x1, stride) // x2 = x1 + depth*stride
ptr_x3 = addasl(ptr_x1, stride,#1)// x3 = x2 + depth*stride
} {
z2 = vmem(ptr_z++M0)
#if WO
#endif
x1fx1cx1bx18 = memd(ptr_x1+#8) //[0, 3]
} {
z3 = vmem(ptr_z++M1)
#if WO
#endif
x17x14x13x10 = memd(ptr_x1++#16) //[0, 3]
loop0(.L_filt_width, ki) //[P, 9]ki is
ptr_x = addasl(ptr_x, stride, #2) //ptr_x += 4*stride
}
.balign 32
.L_filt_width:
{
dcfetch(ptr_x2+#PREFETCH) //[0, 4]prefetch next line
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 4]
#if WO
y2 = vmem(ptr_y++#2) //[0, 4]32x4
#endif
} {
dcfetch(ptr_x3+#PREFETCH) //[0, 5]prefetch next line
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 5]
#if WO
y3 = vmem(ptr_y+#-1) //[0, 5]32x4
#endif
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 6]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 6]
x2fx2cx2bx28 = memd(ptr_x2+#8) //[0, 6]
x27x24x23x20 = memd(ptr_x2++#16) //[0, 6]
#if WO
#endif
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 7]
x3fx3cx3bx38 = memd(ptr_x3+#8) //[0, 7]
x37x34x33x30 = memd(ptr_x3++#16) //[0, 7]
#if WO
#endif
} {
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 8]
#if WO
y0 = vmem(ptr_y++#2) //[1, 0]32x4
#endif
dcfetch(ptr_x0+#PREFETCH) //[1, 0]prefetch next line
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 9]
#if WO
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
#endif
dcfetch(ptr_x1+#PREFETCH) //[1, 1]prefetch next line
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,10]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x0++#16) //[1, 2]
#if WO
#endif
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,11]
x1fx1cx1bx18 = memd(ptr_x1+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x1++#16) //[1, 3]
#if WO
#endif
}:endloop0
{
dcfetch(ptr_x2+#PREFETCH) //[1, 4]prefetch next line
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 4]
#if WO
y2 = vmem(ptr_y++#2) //[1, 4]32x4
#endif
} {
dcfetch(ptr_x3+#PREFETCH) //[1, 5]prefetch next line
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 5]
#if WO
y3 = vmem(ptr_y+#-1) //[1, 5]32x4
#endif
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 6]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 6]
x2fx2cx2bx28 = memd(ptr_x2+#8) //[1, 6]
x27x24x23x20 = memd(ptr_x2++#16) //[1, 6]
#if WO
#endif
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 7]
x3fx3cx3bx38 = memd(ptr_x3+#8) //[1, 7]
x37x34x33x30 = memd(ptr_x3++#16) //[1, 7]
} {
vmem(ptr_z++M0) = z0 //[E, ]
#if WO
#endif
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 8]
p0 = cmp.gt(col_count, #1)
} {
if(p0)vmem(ptr_z++M0) = z1 //[E, ]
#if WO
#endif
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 9]
p0 = cmp.gt(col_count, #2)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,10]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,11]
if(p0)vmem(ptr_z++M0) = z2.new //[E, ]
#if WO
#endif
p0 = cmp.gt(col_count, #3) //
} {
if(p0)vmem(ptr_z++M0) = z3 //[E, ]
#if WO
#endif
col_count = add(col_count, #-4) //
ptr_y = ptr_yi //[ , P]
}:endloop1
{
ptr_x = add(ptr_x, skip_back) //[E, ]
p1 = cmp.eq(out_height, #0)
if(!p1.new) jump:t .L_height
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvmmacbbw_asm, .L_end-gvmmacbbw_asm
|
XiaoMi/nnlib | 10,102 | hexagon/asm_src/repstreamN_h.S | /*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
CODESIZE 160 bytes
STACK 0 bytes
Description
Stride 1 or 2 Shuffle the stream with itself to feed the new vrmpy ops
and while we are at it, do the alignment to save ops
void repstream2_asm(uint8_t *x, uint8_t *y,
int w, int d, int fill_height, int rpad_lpad, int stride_w,
uint8_t *,
int buf_height, int in_offset);
*/
/* --------------------------------------------------------------------------- */
.text
.global repstreamN_asm
.balign 32
.type repstreamN_asm, @function
repstreamN_asm:
/* --------------------------------------------------------------------------- */
#define ptr_x0 r0 //input data raw
#define ptr_z1 r1 //output cbuf pointer aligned/shuffled data
#define next_in_width r2 //width of padded input
#define in_depth r3 //input depth
#define buf_fill r4 //number of lines to fill
#define rpad_lpad r5 //right and left pad on input requirement packed
#define stride_width r4 //stride_width any value
#define cbuf_base r14 //base of the circular buffer
#define buf_height r15 //number of total logical lines
#define inzero r12 //activation zero value
#define num_acc r18 //number of accumulators to interleave for must be even
/* --------------------------------------------------------------------------- */
#define cbuf_eob r16 //end of circ buf
#define cbuf_size r17 //size of cicular buffer
#define width r10 //width in 128byte block
#define width_cnt r13 //width left in 128byte block
#define rpad r11 //right pad used to minimize stray maxes
#define lpad r6 //left pad that gets removed
#define cm4 r7 //shuffle ints
#define ptr_x1 r8 //temp input ptr
#define ptr_x2 r9 //width of circ buffer 64*(next_in_width+3-lpad+rpad)&(-4)
#define end_stream r19 //number of 128b blocks for 2nd stream to advance
/* --------------------------------------------------------------------------- */
#define x3x2x1x0 v0 //input data
#define x7x6x5x4 v1 //next input data
#define y3y2y1y0 v4 //aligned input data
#define y7y6y5y4 v5 //delayed aligned inout data
#define u3u2u1u0 v8 //delayed by 2 aligned data
#define v3v2v1v0 v6 //delayed by 2 aligned data
#define y3x3y2x2 v3 //shuffled delayed input
#define y1x1y0x0 v2 //shuffled delayed input
#define y3x3y2x2_y1x1y0x0 v3:2 //shuffled delayed input
#define vin_zero v7
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/* --------------------------------------------------------------------------- */
{ allocframe(#56) //0th entry on stack is (112+8)/4=30 ints
width = add(next_in_width, #3) //[S, 0]round to nearest 4
} {
width = sub(width.L, rpad_lpad.L) //[S, 1]next_in_width-lpad+3
memd(sp+#0<<2) = r17:16 //[S, 1]save
inzero = memw(sp+#19<<2) //[S, 8]get zero of activations
} {
rpad = lsr(rpad_lpad, #18) //[S, 2]next_in_width-lpad+3+rpad
width = lsr(width, #2) //[S, 2]number of 128byte blks to fetch
buf_height = memw(sp+#18<<2) //[S, 2]height of buffer num 32depth
} {
memd(sp+#2<<2) = r19:18 //[S, 3]save
cbuf_size = mpyi(buf_height, in_depth) //[S, 3]total size of buffer
rpad = add(rpad, width) //[S, 3]total width of active buffer
inzero = vsplatb(inzero) //[S, 3]
} {
buf_fill = mpyi(buf_fill, in_depth) //[S, 4]
cbuf_size = mpyi(cbuf_size, rpad) //[S, 4]
lpad = zxth(rpad_lpad) //[S, 4]alignment % 128
} {
buf_fill = lsr(buf_fill, #5) //[S, 5]num in_depth*hiehgt/32
lpad = asl(lpad, #5) //[S, 5]alignment*32
num_acc = memw(sp+#20<<2) //[S, 5]how many accumulators must we parallelize
} {
num_acc = asl(num_acc, #4) //[S, 6]if 6 then shift 2 stream by 96bytes
loop1(.L_rows, buf_fill) //[S, 6]loop over num lines
stride_width = memw(sp+#16<<2) //[S, 6] stride_width
} {
stride_width = mpyi(stride_width, num_acc) //[S, 7]get start of buffer to interleave
lpad = and(lpad, #127) //[S, 7]round padding to 128
cbuf_base = memw(sp+#17<<2) //[S, 7]get buffer base
cbuf_size = asl(cbuf_size, #3) //[S, 7]complete buffer size
} {
stride_width = add(stride_width, lpad) //[S, 8]compute offset to upper accumulators
cbuf_eob = add(cbuf_base, cbuf_size) //[S, 8]figure end of buffer
} {
vin_zero = vsplat(inzero) //[S, 9]splat activation zero for pad
cm4 = #-4 //[S, 9]shuffle ints
cbuf_eob = add(cbuf_eob, #-4) //[S, 9]dela with >= case
end_stream = lsr(stride_width, #7) //[S, 9]when 2nd stream hits end of buffer
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_rows:
{ p3 =sp1loop0(.L_cols, rpad) //[P, 0]inner loop width
width_cnt = add(width, #-1) //[P, 0]
} {
ptr_x2 = add(ptr_x0, stride_width) //[P, 1]
x3x2x1x0 = vmem(ptr_x0+#0) //[0, 0]load 1st 128
ptr_x1 = add(ptr_x0, #128) //[P, 1]
} {
y3y2y1y0 = vmem(ptr_x2++#1) //[P, 2]load 1st 128
p1 = cmp.gt(width_cnt, #0) //[0, 1]
p2 = cmp.gt(width_cnt, end_stream) //[0, 1]
x7x6x5x4 = vin_zero //[0, 1]
}
.L_cols:
{ width_cnt = add(width_cnt, #-1) //[0, 2]
if(p3) vmem(ptr_z1++#1) = y3x3y2x2 //[0, 2]empty pipe
y7y6y5y4 = vin_zero //[0, 2]
} {
if(p1) x7x6x5x4.tmp = vmem(ptr_x1++#1):nt //[0, 3]
u3u2u1u0 = valign(x7x6x5x4,x3x2x1x0, lpad) //[0, 3]aligned data next 128
x3x2x1x0 = x7x6x5x4 //[0, 3]update pipe
} {
if(p2) y7y6y5y4.tmp = vmem(ptr_x2++#1):nt //[0, 4]
v3v2v1v0 = valign(y7y6y5y4,y3y2y1y0, stride_width) //[0, 4]aligned data next 128
y3y2y1y0 = y7y6y5y4 //[0, 4]update pipe
x7x6x5x4 = vin_zero //[0, 4]
} {
y3x3y2x2_y1x1y0x0 = vshuff(v3v2v1v0,u3u2u1u0,cm4) //[0, 5]shuffle up for new vrmpy
vmem(ptr_z1++#1) = y1x1y0x0.new //[0, 5]empty pipe
p1 = cmp.gt(width_cnt, #0) //[0, 5]
p2 = cmp.gt(width_cnt, end_stream) //[0, 5]
}:endloop0
/* --------------------------------------------------------------------------- */
{ vmem(ptr_z1++#1) = y3x3y2x2 //[0, 1]empty pipe
ptr_x0 = addasl(ptr_x0, next_in_width, #5) //[E, 0]update input next row
} {
p2 = cmp.gt(ptr_z1, cbuf_eob) //[E, 1]if circ buf write and end of buf
if(p2.new) ptr_z1 = sub(ptr_z1, cbuf_size) //[E, 1]then subtract buf size take to 1st row
}:endloop1
{
r17:16 = memd(sp+#0<<2) //[T, 0]
r19:18 = memd(sp+#2<<2) //[T, 0]
} {
dealloc_return //[T, 1]return to caller
}
/* --------------------------------------------------------------------------- */
.L_end:
.size repstreamN_asm, .L_end-repstreamN_asm
|
XiaoMi/nnlib | 18,525 | hexagon/asm_src/dwconv2dbbb_d32_v60_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements along depth, do only simple
* convolution.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 07/ 6/17 created
*
* MEMORY
* CODESIZE = 640 bytes + 512 bytes of control tables
* STACK = 80 bytes
* ASSUMPTIONS
* width multiple of 4 depth multiple of 32 aligned to 128bytes
* MODEL
#if 0
* void dwconv2d_cn(
* uint8_t * in_buf, int in_width, int in_height, int depth,
* int stride_width, int stride_height,
* int in_offset, int8_t * filt, int filt_width, int filt_height,
* int * out_buf, int out_width, int out_height, int adj_x, int adj_y)
* {
* int out_y, in_y_base, out_x, in_x_base;
* int out_z, filt_z, filt_y, filt_x, in_element, filt_element, sum;
* int * outstripe;
* uint8_t * instripe;
* int8_t * filtstripe;
*
* for (out_y = 0; out_y < out_height; out_y++) {
* in_y_base = out_y * stride_height - adj_y;
* for (out_x = 0; out_x < out_width; out_x++) {
* in_x_base = out_x * stride_width - adj_x;
* outstripe = out_buf+(depth*(out_x+ out_width*out_y));
* for (out_z = 0; out_z < depth; out_z++) {
* sum = 0;
* for (filt_y = 0; filt_y < filt_height; filt_y++) {
* if ((in_y_base + filt_y) >= in_height) continue;
* if ((in_y_base + filt_y) < 0) continue;
* for (filt_x = 0; filt_x < filt_width; filt_x++) {
* if ((in_x_base + filt_x) >= in_width) continue;
* if ((in_x_base + filt_x) < 0) continue;
*
* filtstripe = filt+(depth*(filt_x+ filt_width*filt_y));
* filt_element = filtstripe[out_z];
*
* instripe = in_buf+depth*(in_x_base+filt_x+in_width*(in_y_base+filt_y));
* in_element = instripe[out_z] - in_offset;
*
* sum += in_element*filt_element;
* }
* }
* outstripe[out_z] = sum;
* }
* }
* }
* return;
* }
#endif
*/
/* =========================================================================== */
.text
.file "dwconv2dbbb_d32_v60_h.S"
.global dwconv2dbbb_v60_asm
.balign 32
.type dwconv2dbbb_v60_asm, @function
dwconv2dbbb_v60_asm:
/* =========================================================================== */
//h stride assumed 1 vstride 1 or 2 filt width assumed 3 - hstride 2 requires new function
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define next_in_width_depth r3 //width*depth*(stride_horz==1)
#define next_out_width_depth r4 //next output line amount in bytes
#define next_in_width_32 r5 //width*32*(stride_horz==1)
#define next_out_width_32 r16 //0next output line amount in bytes
#define in_depth r17 //1 total in depth split into rows of depth 32
#define out_width r18 //2is amount of work to be done
#define out_height r19 //3 number of vertical lines to perform
#define filt_height r20 //4 filt_height lines per filter
#define ptr_max r21 //5 maximum and minum buffer
#define recip_level r22 //6 255 / (MAX - MIN) - used to scale to bytes
#define filt_sumi r23 //7 gemsumb
#define stride_vert r24 //8 vertical stride is an option to save ops
#define zshift r6 //9 spare input
#define perm_ctrl r25 //10 ptr to the fancy data shuffling controls
//-----------------------------------------------------------------
#define s8 r7 //const = 8
#define c4 r6 //deal words
#define out_in_wide_deep_128 r9:8 //
#define out_wide_deep_128 r9 //advance ptr 128 along and pack to current line start
#define in_wide_deep_high_128 r8 //width*depth*filt_height - 128
#define depth r10 //current depth used
#define ptr_x0 r12 //
#define ptr_z0 r13 //
#define ptr_z0_ptr_x0 r13:12 //
#define ptr_x1 r11 //
#define ptr_z1 r14 //
#define ptr_w r15 //
#define filt_sum r22 //
#define col_count r26 //
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0 //
#define woffset v1 //
#define s00 v2 //
#define s01 v3 //
#define s01s00 v3:2 //
#define s10 v4 //
#define s11 v5 //
#define s11s10 v5:4 //
#define d1_d0 v11:10 //
#define d3_d2 v13:12 //
#define d0 v10 //
#define d1 v11 //
#define d2 v12 //
#define d3 v13 //
#define d1d0 v8 //
#define d3d2 v16 //
#define d3_d0 v17 //
#define perm2 v6 //
#define perm3 v7 //
#define y0 v21 //
#define y1 v24 //
#define x0 v29 //
#define x1 v9 //
#define z3210 v26 //
#define z5432 v28 //
#define z5476 v27 //
#define w_210 v22 //
#define u_210 v2 //
#define w210_ v23 //
#define maxo_maxe v31:30 //
#define mino_mine v15:14 //
#define maxe v30 //
#define mine v14 //
#define maxo v31 //
#define mino v15 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ allocframe(#72) //0th entry on stack (72+8)/4=20
maxe = #0 //
s8 = #8 //shift by 8
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
r23 = ##0x7fffffff //max pos
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
mine = vsplat(r23) //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
} {
perm_ctrl = memw(sp+#30<<2) //
next_out_width_32 = memw(sp+#20<<2) //
maxe.w = vsub(maxe.w, mine.w) //
} {
in_depth = memw(sp+#21<<2) //
out_width = memw(sp+#22<<2) //
} {
out_height = memw(sp+#23<<2) //
recip_level = memw(sp+#26<<2) //
} {
vrecip = vsplat(recip_level) //
perm2 = vmem(perm_ctrl+#0) //
ptr_max = memw(sp+#25<<2) //
} {
filt_sumi = memw(sp+#27<<2) //
perm3 = vmem(perm_ctrl+#1) //
out_wide_deep_128=add(next_out_width_depth,#-128)//
in_wide_deep_high_128=add(next_in_width_depth,#-128)//
} {
filt_height = memw(sp+#24<<2) //
in_depth = lsr(in_depth, #5) //1/32
col_count = out_width //
} {
filt_height = add(filt_height, #-1) //correct for vertical loop
ptr_z0 = ptr_zi //
ptr_x0 = ptr_xi //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_height:
.L_width:
{ loop1(.L_depth, in_depth) //number of 32 depths
woffset.cur = vmem(filt_sumi+#0) //
ptr_z1 = ptr_z0 //
s01s00 = vcombine(woffset,woffset) //filter offset * xoffset and bias
} {
loop0(.L_vloop, filt_height) //can have a filter of Nx3 stride=1
ptr_x1 = ptr_x0 //
x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
} {
x1.tmp = vmem(ptr_x1+#1) //[0,1]
y1 = vrdelta(x1, perm2) //[0,1]
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,1]move to next pt in same depth position
} {
z3210.b = vshuff(y0.b) //[0,2]x3210
w_210 = vmem(ptr_wi+#0) //[0,2]
s11s10 = vcombine(woffset,woffset) //
filt_sum = add(filt_sumi, #128) //
} {
z5476 = vdelta(y1, perm3) //[0,3]x7654
u_210.tmp = vmem(ptr_wi+#0) //[0,3]
w210_.w = vasl(u_210.w, s8) //[0,3]
ptr_w = add(ptr_wi, #128) //restart filter stream
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_depth:
.L_vloop:
{ s00.w += vrmpy(z3210.ub, w_210.b) //[0,4]filter even output
z5432.h = vshuffo(z5476.h, z3210.h) //[0,4]
x0.tmp = vmem(ptr_x1+#0) //[1,0]
y0.b = vshuff(x0.b) //[1,0]
} {
s01.w += vrmpy(z3210.ub, w210_.b) //[0,5]z3210filter oddn output
x1.tmp = vmem(ptr_x1+#1) //[1,1]
y1 = vrdelta(x1, perm2) //[1,1]
ptr_x1 =add(ptr_x1, next_in_width_depth) //[1,1]move to next pt in same depth position
} {
s10.w += vrmpy(z5432.ub, w_210.b) //[0,6]z5432
z3210.b = vshuff(y0.b) //[1,2]x3210
w_210 = vmem(ptr_w+#0) //[1,2]
} {
s11.w += vrmpy(z5432.ub, w210_.b) //[0,7]z5432
z5476 = vdelta(y1, perm3) //[1,3]x7654
u_210.tmp = vmem(ptr_w++#1) //[1,3]
w210_.w = vasl(u_210.w, s8) //[1,3]
}:endloop0 //max accumulator=9*255=8f7=12bits-2^24
/* --------------------------------------------------------------------------- */
{ s00.w += vrmpy(z3210.ub, w_210.b) //[1,4]z3210
z5432.h = vshuffo(z5476.h, z3210.h) //[1,4]
ptr_x0 = add(ptr_x0, next_in_width_32) //update input ptr to next depth position
loop0(.L_vloop, filt_height) //can have a filter of Nx3 stride = 1
} {
s01.w += vrmpy(z3210.ub, w210_.b) //[1,5]z3210
zshift = memw(r29+#29<<2)
maxe.w = vmax(maxe.w, s00.w) //find max
mine.w = vmin(mine.w, s00.w) //find min
} {
s10.w += vrmpy(z5432.ub, w_210.b) //[1,6]z5432
mine.w = vmin(mine.w, s01.w) //find min
s00.w = vasl(s00.w,zshift)
ptr_z0 = add(ptr_z0, next_out_width_32) //update output ptr to next depth
} {
maxe.w = vmax(maxe.w, s01.w) //find max
} {
s11.w += vrmpy(z5432.ub, w210_.b) //[1,7]z5432
s01.w = vasl(s01.w,zshift)
maxe.w = vmax(maxe.w, s10.w) //find max
ptr_x1 = ptr_x0 //
} {
d0.w = vmpye(s00.w, vrecip.uh) //multiply by 1/max
s10.w = vasl(s10.w,zshift)
mine.w = vmin(mine.w, s10.w) //find min
} {
s11.w = vasl(s11.w,zshift)
d1.w = vmpye(s01.w, vrecip.uh) //multiply by 1/max
x0.tmp = vmem(ptr_x1+#0) //[0,0]read first 4 depths
y0.b = vshuff(x0.b) //[0,0]1st part of shuffle 4
} {
d0.w += vmpyo(s00.w, vrecip.h):SSR //multiply by 1/max
maxe.w = vmax(maxe.w, s11.w) //find max
x1.tmp = vmem(ptr_x1+#1) //[0,1]load 2nd 4 depths
y1 = vrdelta(x1, perm2) //[0,1]1st part of shuffle 4
} {
d1.w += vmpyo(s01.w, vrecip.h):SSR //multiply by 1/max
} {
d2.w = vmpye(s10.w, vrecip.uh) //multiply by 1/max
} {
woffset = vmem(filt_sum++#1) //read in sum of taps
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,1]update ptr to next logical line
z5476 = vdelta(y1, perm3) //[0,3]x7654
d3.w = vmpye(s11.w, vrecip.uh) //multiply by 1/max
} {
d2.w += vmpyo(s10.w, vrecip.h):SSR //multiply by 1/max
d1d0.h = vpacke(d1.w, d0.w) //take upp er 16bits of rnded acc
} {
d3.w += vmpyo(s11.w, vrecip.h):SSR //multiply by 1/max
} {
mine.w = vmin(mine.w, s11.w) //find min of acc
w_210 = vmem(ptr_w+#0) //[0,2]
s00 = woffset //init accumulators
s01 = woffset //init accumulators
} {
d3d2.h = vpacke(d3.w, d2.w) //take upp er 16bits of rnded acc
s10 = woffset //init accumulators
stride_vert = memw(sp+#28<<2) //
c4 = #-4 //deal words
} {
s11 = woffset //init accumulators
u_210.tmp = vmem(ptr_w++#1) //[0,3]
w210_.w = vasl(u_210.w, s8) //[0,3]adjust taps to odd locations
z3210.b = vshuff(y0.b) //[0,2]x3210
} {
d3_d0.ub = vpack(d3d2.h, d1d0.h):sat //deal into sequence
vmem(ptr_z1+#0) = d3_d0.new //store quantized bytes
ptr_z1 = ptr_z0 //next output depth
}:endloop1 //end depth
/* --------------------------------------------------------------------------- */
{ ptr_z0_ptr_x0 = vsubw(ptr_z0_ptr_x0, out_in_wide_deep_128) //next inputs/outputs
col_count = add(col_count, #-4) //dec width count
p0 = cmp.eq(col_count, #4) //next line
if(!p0.new) jump:t .L_width //
}
/* --------------------------------------------------------------------------- */
{ ptr_zi=add(ptr_zi,next_out_width_depth) //incrmeent output ptr
ptr_xi+=mpyi(stride_vert,next_in_width_depth)//incrmeent input ptr
col_count = out_width //reset row count
out_height = add(out_height, #-1) //
} {
ptr_z0 = ptr_zi //update to next output
ptr_x0 = ptr_xi //update to next input
p0 = cmp.eq(out_height, #0) //
if(!p0.new) jump:t .L_height //next line
}
/* --------------------------------------------------------------------------- */
{ loop0(.L_peak, #4) //
maxo_maxe = vdeal(maxe, maxe, c4) //deal out odd and even
r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //restore
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //reduce
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //
}:endloop0
{ maxe.w = vmax(maxe.w, maxo.w) //
vmem(ptr_max+#0) = maxe.new //store max
mino_mine = vdeal(mine, mine, c4) //
r25:24 = memd(sp+#32) //restore
} {
mine.w = vmin(mine.w, mino.w) //
vmem(ptr_max+#1) = mine.new //store min
r27:26 = memd(sp+#40) //restore
}
/* --------------------------------------------------------------------------- */
{
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
dealloc_return //return
}
.L_end:
.size dwconv2dbbb_v60_asm, .L_end-dwconv2dbbb_v60_asm
|
XiaoMi/nnlib | 5,853 | hexagon/asm_src/vmemcpy_weights_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
.global vmemcpy_weights_asm
.type vmemcpy_weights_asm, @function
.balign 32
vmemcpy_weights_asm:
/* ============================================================================ */
#define dst r0 //dest ptr
#define src r1 //src ptr
#define length r2 //num bytes
/* ============================================================================ */
#define aligned r4
#define srcalign r4
#define dstalign r5
#define mid r6
#define end r7
#define sel0 r8
#define kernel r3
#define sel1 r9
#define dsto r10
#define c127 r11
#define x0 v0
#define x1 v1
#define y0 v2
#define vpredp v3
#define vprede v4
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{
c127 = #127
aligned = or(dst,src)
kernel = lsr(length, #7)
qprolog =vsetq(dst) //qprolog vec predicate __|---
} {
sel0 = ##0x01010101 //position of qprolog
aligned = or(aligned,length)
loop0(.L_fast_copy, kernel)
} {
end = add(length, dst) //last byte of block
p0 = bitsclr(aligned, c127)
if(p0.new) jump:t .L_fast_copy
} {
dstalign = and(dst, #127) //alignment of dst
sel1 = add(sel0, sel0) //position of modified vec predicates
qepilog = vsetq(end) //setup epilog vec predicate
dsto = add(dst, length) //updated destination ptr
} {
srcalign = and(src, #127) //alignment of src
end = and(end, #127) //alignment of last byte
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
vprede = vand(qepilog, sel1) //write epilog pred into vreg
} {
mid = sub(srcalign, dstalign) //shift up or down src data
dstalign = add(dstalign, length) //amount of total data
kernel = sub(length, end) //bytes in loop0
qprolog = or(qprolog, !qepilog) //modified proglog if no kernel
} {
vpredp|= vand(qprolog, sel0) //store modified prolog
p2 = cmp.gt(dstalign, #127) //if > 127 dont use modified prolog
if(!p2.new) sel1 = sel0 //dont choose modfied
kernel = add(kernel, #127) //round kernel up to 128 nearest
} {
x0 = vmem(src+#0) //load first block of input data
kernel= lsr(kernel, #7) //kernel in blocks of 128
qprolog = vand(vpredp, sel1) //select the qprolog
p1 = cmp.gt(mid, #-1) //see if we shift down
} {
qepilog = vand(vprede, sel1) //choose correct qepilog
if(p1) src = add(src, #128) //if shift up force reload
loop0(.L_blocks, kernel) //start main loop
}
/* ============================================================================ */
.balign 32
.L_blocks:
{ x1.tmp = vmem(src++#1):nt //load next bloc
y0 = valign(x1, x0, mid) //align using the offset mid
x0 = x1 //reuse x1 in next loop
} {
if(!qprolog) vmem(dst++#1):nt = y0 //do prolog load as part of main loop
qprolog = and(qprolog, !qprolog) //make all subsequent prologs true
}:endloop0
/* ============================================================================ */
{ x1.tmp = vmem(src+#0):nt //load next or reload data
y0 = valign(x1, x0, mid) //aligne for final output
} {
if(qepilog) vmem(dst+#0):nt = y0 //store out epilog data
r0 = dsto //return updated pointer
}{
jumpr r31 //return to caller
}
.balign 32
.L_fast_copy:
{
x1.tmp = vmem(src++#1):nt //load next bloc
vmem(dst++#1):nt = x1 //store out
}:endloop0
{
jumpr r31 //return to caller
}
.L_end:
/*==============================================================================*/
.size vmemcpy_weights_asm, .L_end-vmemcpy_weights_asm
|
XiaoMi/nnlib | 8,132 | hexagon/asm_src/copyNto4_h.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if 0
void copyNto4(uint8_t *out,uint8_t *in, int n, int in_offset, int in_depth)
{
int i;
for(i=0; i < n; i++) {
if(in_depth == 1) {
out4[4*i+0] = in3[1*i+0];
out4[4*i+1] = in_offset
out4[4*i+2] = in_offset
out4[4*i+3] = in_offset;
} else if(in_depth == 2) {
out4[4*i+0] = in3[2*i+0];
out4[4*i+1] = in3[2*i+1];
out4[4*i+2] = in_offset;
out4[4*i+3] = in_offset;
} else if(in_depth == 3)
out4[4*i+0] = in3[3*i+0];
out4[4*i+1] = in3[3*i+1];
out4[4*i+2] = in3[3*i+2];
out4[4*i+3] = in_offset;
} else { //in_depth == 4
out4[4*i+0] = in3[4*i+0];
out4[4*i+1] = in3[4*i+1];
out4[4*i+2] = in3[4*i+2];
out4[4*i+3] = in3[4*i+3];
}
}
return;
}
#endif
/*======================================================================*/
.text
.global copyNto4_asm
.balign 32
.type copyNto4_asm, @function
copyNto4_asm:
/* ------------------------------------------------------------------- */
#define out4 r0 //aligned 128 out ptr
#define inN r1 //non-aligned input data
#define elemns r2 //num out elements
#define in_offset r3 //input "zero"
#define in_depth r4 //input depth 1-4
#define cntrl_tab r5 //cntrl table for 3 to 4 expansion
/* ------------------------------------------------------------------- */
#define sizeb r6 //shuffling bytes
#define sizeh r7 //shuffling shorts
/* ------------------------------------------------------------------- */
#define vpred v0 //vector predicate container
#define vperm34 v1 //permute 3 to 4 control
#define vin_offset v2 //vectorized input zero
#define b95b00 v3 //bytes 0-95
#define b63b00 v3 //bytes 0-63
#define b31b00 v3 //bytes 0-31
#define b255b128_b127b00 v5:4 //output bytes and extra garbage
#define b255b128 v5 //garbage
#define b127b00 v4 //output data
#define h127h64_h63h00 v7:6 //output hwords and garbage
#define h127h64 v7 //garbage
#define h63h00 v6 //shuffled bytes to hwords
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* ------------------------------------------------------------------- */
{ r6 = ##0x01000000 //4th byte mux in filt_offset
r10 = asl(in_depth, #5) //input does 1-32,2-64,3-96,4-128 bytes
} {
M0 = r10 //increment ptr
vperm34 = vmem(cntrl_tab+#0) //permute for 3 to 4
vpred = vsplat(r6) //vmem(cntrl_tab+#1)
} {
in_offset = vsplatb(in_offset) //expenad zero for activations.
loop1(.L_switch, #2) //set up switch loop
sizeh = #-2 //shuffle pairs of bytes
} {
vin_offset = vsplat(in_offset) //mux in vector
sizeb = #-1 //shuffle pairs of bytes
p0 = cmp.eq(in_depth, #1) //is depth 1?
if(p0.new) jump:nt .L_depth1 //
} {
r10= ##0x01010101 //set up vpredicate
p1 = cmp.eq(in_depth, #2) //is depth 2?
if(p1.new) jump:nt .L_depth2 //
} {
q0 = vand(vpred, r10) //setup merge predicate for
p2 = cmp.eq(in_depth, #3) //is depth 3?
if(p2.new) jump:t .L_depth3 //
}
/* ------------------------------------------------------------------- */
.balign 32
.L_depth4:
{ loop0(.L_loop44, elemns) //4byte loop default
}
/* ------------------------------------------------------------------- */
.L_loop44:
{ b127b00 = vmemu(inN++M0) //increment by 128
} {
vmem(out4++#1) = b127b00 //store 128bytes
}:endloop0:endloop1
/* ------------------------------------------------------------------- */
.balign 32
.L_depth3:
{ b95b00 = vmemu(inN++M0) //increment by 96
loop0(.L_loop34, elemns) //3bytes loop
}
/* ------------------------------------------------------------------- */
.L_loop34:
{ b127b00 = vdelta(b95b00, vperm34) //choose 4th byte
} {
b127b00 = vmux(q0,vin_offset,b127b00) //merge in in_offset values
b95b00 = vmemu(inN++M0) //increment by 96
} {
vmem(out4++#1) = b127b00 //store 128bytes
}:endloop0:endloop1
/* ------------------------------------------------------------------- */
.balign 32
.L_depth2:
{ b63b00 = vmemu(inN++M0) //increment by 64
loop0(.L_loop24, elemns) //2byte depth
}
/* ------------------------------------------------------------------- */
.L_loop24:
{ h127h64_h63h00 = vshuff(vin_offset,b63b00,sizeh) //choose 2th bytes
} {
b63b00 = vmemu(inN++M0) //increment by 64
} {
vmem(out4++#1) = h63h00 //store 128bytes
}:endloop0:endloop1
/* ------------------------------------------------------------------- */
.balign 32
.L_depth1:
{ b31b00 = vmemu(inN++M0) //increment by 32
loop0(.L_loop14, elemns) //1byte loop
}
/* ------------------------------------------------------------------- */
.L_loop14:
{ h127h64_h63h00 = vshuff(vin_offset,b31b00,sizeb) //shuffle bytes P 1 P 0
} {
b31b00 = vmemu(inN++M0) //increment by 32
} {
b255b128_b127b00=vshuff(vin_offset,h63h00,sizeh) //shuffle in hwords of in_offset
vmem(out4++#1) = b127b00.new //store 128bytes
}:endloop0:endloop1
/* ------------------------------------------------------------------- */
.L_switch:
{ jumpr r31
}
.L_end:
.size copyNto4_asm, .L_end-copyNto4_asm
|
XiaoMi/nnlib | 23,039 | hexagon/asm_src/gvconv2dbbb_d32_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_h.S"
.global gvconv2dbbb_v66_asm
.balign 32
.type gvconv2dbbb_v66_asm, @function
gvconv2dbbb_v66_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xi r0 //data aligned 128
#define ptr_wi r1 //weights aligned 128
#define ptr_zi r2 //results aligned 128
#define in_width r3 //(pad_l+in_width+pad_r) => 4 %4
#define out_next_row r4 //value in bytes to get to next full out row
#define out_width r5 //out_width_pad
#define stride_h_w r26 //0 stride_height|stride_width
#define in_depth r27 //1 %32
#define in_depth_stride_h_w r27:26
#define filt_width r8 //2 >= 1
#define filt_height r9 //3 >= 1filt_height lines per filter
#define filt_height_width r9:8
#define out_height r10 //4 >= 1 number of vertical lines to perform
#define ptr_filtsum r11 //5 aligned 128
#define ptr_max r12 //6 aligned 128
#define recip_level r13 //7 recip is 31bit unsigned 0x7f800000000LL / max
#define recip_level_ptr_max r13:12//
#define out_align r6 //8 0, 32, 64, 96
#define left_skip_tmp_out_align r7:6
#define left_skip_tmp r7 //9
#define out_next_d32 r14 //10
#define nslice r21 //11
#define recip_shamt r27 //12
#define ptr_w_next r17 //
#define AEQ0 r27 //
#define STQ r12 //
#define ptr_x1 r7 //
#define skip_col r13 //
#define lmask r21 //
/*=============================================================================*/
#define stride_h r26 //0 stride_height|stride_width
#define in_next_rows r15 //in_width * stride_h * in_depth for next output
#define ptr_x0 r16 //
#define stride_w r18 //stride width
#define next_outputs r19 //jump to input ptr for next set of outputs
#define ptr_w r20 //
#define in_width_32 r22 //
#define ptr_x2 r23 //
#define ptr_z r24 //
#define col_count r25 //
#define PRED3_0 C4 //used to boradside load all predicates
#define STACK 72 //amount of stack to reserve
/*=============================================================================*/
#define PV32(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v19 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210 v6 //
#define x3_prev v16 //previous value
#define xout v17 //realigned out
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxe v12 //
#define mine v18 //
#define biasvec v16 //
#define recipvec v15 //
#define recip_sh_vec v13 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
/*=============================================================================*/
{ filt_height_width = memd(sp+#8) //
recip_level_ptr_max = memd(sp+#24) //ptr pre computed max value in output
ptr_xi = and(ptr_xi, #-2) //guarentee lsb is 0
sp = add(sp,#-STACK) //
} {
memd(sp+#40) = r27:26 //
in_depth_stride_h_w = memd(sp+#(STACK+0)) //extract strides h + w
} {
memd(sp+#32) = r25:24 //
col_count = out_width //out_width
filt_width += add(filt_width, #-1) //x2 to account for lop of 16bytes, -1 account for epilog
maxe = vmem(ptr_max+#0) //
} {
memd(sp+#24) = r23:22 //
memd(sp+#16) = r21:20 //
filt_height = mpy(filt_height.L,in_depth.L)//filt_height*in_depth
in_next_rows= mpy(stride_h_w.H, in_depth.L)//
} {
memd(sp+#8) = r19:18 //
memd(sp+#0) = r17:16 //
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
} {
ptr_filtsum = memw(sp+#(STACK+20)) //ptr pre computed weight sum
loop1(.L_filt_height, filt_height) //[P,0]for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1){
stride_w = zxth(stride_h_w) //extract width
r21 = #32
} {
recipvec = vmem(recip_level++#1) //
} {
next_outputs=mpyi(filt_height,in_width) //filt_height*in_width*in_depth
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
left_skip_tmp_out_align = memd(sp+#(STACK+32))// NOTE clobbers ptr_x1, but not in use yet
mine = vmem(ptr_max+#1) //
} {
memw(sp+#(STACK+28)) = recip_level //ptr pre computed max value in output
left_skip_tmp = and(left_skip_tmp, #0x7f) //extract lower left skip value
skip_col = extract(left_skip_tmp, #2, #16) //extract skip col bits
} {
ptr_xi = add(ptr_xi,left_skip_tmp)
next_outputs -= asl(stride_w,#2) //1,2
r21 -= lsr(out_align,#2) //1/4 for bytes: 0->32, 32->24, 64-> 6, 96->8
recip_shamt = memw(sp+#(STACK+48)) //
} {
STQ = !cmp.eq(r0, r0) //force p2 off
r21 = and(r21, #0x1f) //
r7 = #-1 //
} {
recip_sh_vec = vsplat(recip_shamt) //
r7 = asl(r7, r21) //create left mask
r21 = #0x1f //
} {
memw(sp+#60) = r7 //
r21 &= asl(skip_col, #3) //
r7 = #-1 //
} {
r7 = lsr(r7, r21) //create rmask
} {
memw(sp+#64) = r7 //
} {
memw(sp+#48) = ptr_xi //
next_outputs = asl(next_outputs, #5) //(flt_hight*in_width*in_depth/32-4*stride)*32
in_next_rows =mpyi(in_width,in_next_rows) //total vertical stride bytes
AEQ0 = cmp.eq(out_align, #0) //if no alignment enable store
}
/*=============================================================================*/
.balign 64
.L_depth:
{ ptr_w = ptr_wi //[P,0]ptr_y=ptr_yi initialize filter pointer
out_height = memw(sp+#(STACK+16)) //number of output lines
ptr_x0 = ptr_xi //ptr_xi
lmask = memw(sp+#60) //
} {
memw(sp+#56) = ptr_zi //
ptr_z = add(ptr_zi, #0) //
wsum = vmem(ptr_filtsum++#1) //
ptr_x2 = and(ptr_xi, #-128) //[Pheight]make loads aligned to 128 zero out bits 0-6
} {
ptr_x1 = add(ptr_x0, #4) //[Pheight]setup initial pointer
z = vmem(ptr_x2+#0) //[Pheight]load 0-127
s3s2 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
s1s0 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
}
/*=============================================================================*/
.L_height:
.L_width:
.L_filt_height:
{ ptr_x0 = add(ptr_x0, in_width_32) //[E, 0]move to next line ptr_y keeps going
z = vmem(ptr_x2+#1) //load 128-255
ptr_x1 += mpyi(stride_w, #24*4) //4*stride_w*24 = 96*stride_w- canbe adusted 0,+32
nop
}
.balign 64
/*=============================================================================*/
.L_filt_width:
{ w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 7]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 7]perform mac across 4 streams with saem weights
z = vmem(ptr_x1+#0) //load next stride=1 128 or stride=2 64 bytes
}:endloop0
/*=============================================================================*/
{ w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
} {
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
ptr_x2 = and(ptr_x0, #-128) // make loads aligned to 128 zero out bits 0-6
w0.tmp = vmem(ptr_w++#1) //[1, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[1, 6]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[1, 7]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[1, 7]perform mac across 4 streams with saem weights
z = vmem(ptr_x2+#0) //load 0-127 bytes into z buffer
ptr_x1 = add(ptr_x0, #4) //reset ptr for next row of filter taps
}:endloop1
/*=============================================================================*/
s0.w = vasl(s0.w, recip_sh_vec.w) //shift left before shifting right by 31
s1.w = vasl(s1.w, recip_sh_vec.w) //shift left before shifting right by 31
s2.w = vasl(s2.w, recip_sh_vec.w) //shift left before shifting right by 31
s3.w = vasl(s3.w, recip_sh_vec.w) //shift left before shifting right by 31
{
PRED3_0 = lmask //load all predicates with mask bits
lmask = #-1 //default all enabled
x1.h = vpack(y1.w, y0.w):sat //[E1, 0]packe low 16bits together
} {
ptr_x0 = sub(ptr_x0, next_outputs) //[E0, 1]reset data ptr to next 4
y0.w = vmpye(s0.w, recipvec.uh) //[E0, 1](s2 * recip + rnd)>>31
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]pack low 16bits together
col_count=add(col_count,#-4) //count -=4 ptr_z += 128
} {
y0.w+= vmpyo(s0.w, recipvec.h):RSS //[E0, 2]((s0<<rsh) * recip + rnd )>>31
ptr_x2 = and(ptr_x0, #-128) //make loads aligned to 128 zero out bits 0-6
} {
loop1(.L_filt_height, filt_height) //[E0, 0]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
if(!p0) y0 = maxe //[E0, ]
} {
maxe.w = vmax(maxe.w, y0.w) //[E0, 0]see if s0 is max
if(!p0) y0 = mine //[E0, ]
} {
mine.w = vmin(mine.w, y0.w) //[E0, 0]see if s0 is min
} {
y1.w = vmpye(s1.w, recipvec.uh) //[E0, 3](s2 * recip + rnd)>>31
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255 and pack low 8bits
ptr_x1 = add(ptr_x0, #4) //setup initial pointer
} {
y1.w+= vmpyo(s1.w, recipvec.h):RSS //[E0, 4](s2 * recip + rnd)>>31
} {
if(!p1) y1 = maxe //[E0, ]
} {
maxe.w = vmax(maxe.w, y1.w) //[E0, 3]
if(!p1) y1 = mine //[E0, ]
} {
mine.w = vmin(mine.w, y1.w) //[E0, 4]see if z0 is max
p1 = STQ //[E0, ]
} {
y2.w = vmpye(s2.w, recipvec.uh) //[E0, 5](s2 * recip + rnd)>>31
s1s0 = vcombine(wsum, wsum) //[E0, 5]initialize accumulator 0,1
ptr_w_next = ptr_w //[E0, ]
ptr_w = ptr_wi //[E0, 5]ptr_y=ptr_yi init filter pointer
} {
y2.w+= vmpyo(s2.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 6](s2 * recip + rnd)>>31
xout = vlalign(x3210,x3_prev,out_align) //[E1, 6]
if(p1)vmem(ptr_z++#1):nt = xout.new //[E1, 6]store 2nd 32bytes
} {
if(!p2) y2 = maxe //[E0, ]
p0 = cmp.eq(col_count, #4) //[E0, 4]compare for branch
} {
if(p0) lmask = memw(sp+#64) //[E0, ]switch in the right hand mask on last iteration
maxe.w = vmax(maxe.w, y2.w) //[E0, 4]
if(!p2) y2 = mine //
} {
mine.w = vmin(mine.w, y2.w) //[E0, 5]see if z0 is max
} {
y3.w = vmpye(s3.w, recipvec.uh) //[E0, 7]#(s2 * recip + rnd)>>31
x3_prev = x3210 //[E1, 7]save data for next output align
STQ = AEQ0 //[E1, 7]update predicate piplione
AEQ0 = cmp.eq(r0, r0) //[E1, 7]set to true
} {
y3.w+= vmpyo(s3.w, recipvec.h):RSS //[E0, 8](s2 * recip + rnd)>>31
s3s2 = vcombine(wsum, wsum) //[E0, 8]initialize accumulator 2,3
z = vmem(ptr_x2+#0) //[P, 0]pre load 0-127 for next row of filter
} {
if(!p3) y3 = maxe //[E0, ]
p0 = cmp.eq(col_count, #0) //[E0, 4]compare for branch
} {
maxe.w = vmax(maxe.w, y3.w) //[E0, 2]
if(!p3) y3 = mine //[E0, ]
} {
mine.w = vmin(mine.w, y3.w) //[E0, 2]see if z0 is max
if(!p0) jump:t .L_width //[E1, 8]
}//cols per line kernel loop width
/*=============================================================================*/
{ x1.h = vpack(y1.w, y0.w):sat //[E1, 0]#>>16
out_height = add(out_height, #-1) //Prolog width
STQ = !cmp.eq(r0, r0) //[Pheight]force p2 off
ptr_xi= add(ptr_xi,in_next_rows) //ptr_x+=in_width*stride_h*in_depth)
} {
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]#sat8 <0, >255
p1 = !cmp.eq(out_height, #0) //EE
col_count = out_width //[Pheight]out_width
} {
ptr_x0 = ptr_xi //Prolog width ptr_xi
AEQ0 = cmp.eq(out_align, #0) //[Pheight]if no alignment enable store
ptr_x2 = and(ptr_xi, #-128) //[Pheight]make loads aligned to 128
} {
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255
} {
ptr_zi = add(ptr_zi, out_next_row) //EEnext out line for this depth segment
ptr_x1 = add(ptr_x0, #4) //[Pheight]setup initial pointer
if (p1) z = vmem(ptr_x2+#0) //[Pheight]load 0-127
} {
xout = vlalign(x3210, x3_prev, out_align) //[E1, 6]
vmem(ptr_z+#0):nt = xout.new //[E1, 6]store 2nd 32bytes
ptr_z = add(ptr_zi, #0) //
if (p1) jump:t .L_height //EE
}//end lines per block//last cols per line
/*=============================================================================*/
nslice = memw(sp+#(STACK+44)) //
{
nslice = add(nslice,#-1) //
} {
memw(sp+#(STACK+44)) = nslice //
} {
ptr_zi = memw(sp+#56) //
p1 = cmp.gt(nslice,#0) //
recip_level = memw(sp+#(STACK+28)) //ptr pre computed max value in output
} {
out_next_d32 = memw(sp+#(STACK+40)) //
ptr_wi = ptr_w_next // set ptr of weight
ptr_xi = memw(sp+#48) // restore ptr_xi
} {
if(p1) recipvec = vmem(recip_level++#1) //
} {
memw(sp+#(STACK+28)) = recip_level //ptr pre computed max value in output
ptr_zi = add(ptr_zi,out_next_d32) //
if p1 jump .L_depth //
}
/*=============================================================================*/
ptr_max = memw(sp+#(STACK+24)) //ptr pre computed max value in output
{ vmem(ptr_max+#0) = maxe //[E, 0]32max
r17:16 = memd(sp+#0) //restore r16, r17from stack
} {
vmem(ptr_max+#1) = mine //[E, 0]32min
r19:18 = memd(sp+#8) //restore r18,r19
} {
r21:20 = memd(sp+#16) //restore r20,r11
r23:22 = memd(sp+#24) //restore r22,r13
} {
r25:24 = memd(sp+#32) //restore r24,r15
r27:26 = memd(sp+#40) //restore r26,r17
sp = add(sp,#STACK) //
jumpr r31 // return
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbb_v66_asm, .L_end-gvconv2dbbb_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 20,547 | hexagon/asm_src/gvconv2dbbb_h_v66.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gvm vector matrix multiply, result is */
/* saturated to 8bits */
/* */
/* ARCHITECTURE : QDSP6V66 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/* DJH 01/30/16 installed splatter Z buffer a */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> K*N/512+17*N/4+36 */
/* */
/* MEMORY */
/* CODESIZE = 976 bytes */
/* STACK = 64 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_h.S"
.global gvconv2dbbb_v66_asm
.balign 32
.type gvconv2dbbb_v66_asm, @function
gvconv2dbbb_v66_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define in_width r3 //(pad_x+in_width) * depth
#define out_width r4 //out_width
#define m r5 //is stride of the output matrix always mult of 32
#define stride_depth r6 //0 stride|depth between computations
#define filt_width r7 //1 depth*filt_width
#define filt_height r8 //2 filt_hieght lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_datasum r10 //4
#define ptr_weightsum r11 //5
#define ptr_max r12 //6
#define ptr_biasbuf r14 //7 sat8 ((0x8000 + (x + biass)*recip_level)>>16)
#define recip_level r15 //8
/*=============================================================================*/
#define PV32(VSRC) .word (0x1DFFE020 + VSRC)
#define PZ(ZSRC) .word (0x1DFFE1E0 + ZSRC)
#define PS(SSRC) .word (0x1DFFE100 + SSRC)
#define PD(SSRC) .word (0x1DFFE120 + SSRC)
#define sel r8
#define len r9
#define in_width_stride r13 //in_width * stride for next output
#define ptr_x0 r11
#define stride4 r13 //
#define stride r25
#define next_outputs r23 //jump to input ptr for next set of outputs
#define ptr_y r24 //
#define col_count r22
#define xsuma r0 //kernel sum * filt_offset computed externally
#define xsumb r21 //kernel sum * filt_offset computed externally
#define round_amt r6 //amount to add to bias buf odffset computation
#define one r17
#define c4 r26
#define row0 r14
#define row1 r15
#define row2 r16
#define row3 r17
#define mpy_cntrl r18
#define mpy_cntrl2 r27
#define mpy_cntrl3 r20
/*=============================================================================*/
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v1:0 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vwsum v15 //
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define vc8000 v14 //
#define biasvec v18 //
#define recipvec v16 //
#define rndvec v17 //
#define vpreds v19 //
/*=============================================================================*/
{
sel = ##0x01010101 // entry 0
len = #32 //
} {
q0 = vsetq(len); // 1000
len = #64 //
round_amt = ##0x00008000 //
} {
vpreds = vand(q0, sel) //
q2 = vsetq(len); // 1100
len = #96 //
rndvec = vsplat(round_amt) //
} {
q1 = and(q2, !q0) // 0100
q3 = vsetq(len) // 1110
sel = add(sel, sel) //02020202
} {
vpreds|= vand(q1, sel) //
q2 = and(q3, !q2) // 0010
q3 = not(q3) // 0001
sel = add(sel, sel) //04040404
} {
vpreds|= vand(q2, sel) //
sel = add(sel, sel) //08080808
} {
vpreds|= vand(q3, sel) // entry 3 10101010 selects all zero
stride_depth = memw(sp+#0<<2) //extract stride*depth
filt_width = memw(sp+#1<<2) //extract filt_width*depth
} {
filt_height = memw(sp+#2<<2) //extract filt_height
out_height = memw(sp+#3<<2) //number of output lines
p0 = cmp.eq(filt_width, #1)
} {
ptr_datasum = memw(sp+#4<<2) //data sum ptr
ptr_weightsum = memw(sp+#5<<2) //ptr pre computed weight sum
filt_width = mpy(filt_width.L, stride_depth.L)
} {
ptr_max = memw(sp+#6<<2) //ptr pre computed max value in output
ptr_biasbuf = memw(sp+#7<<2) //read in the ptr to the bias buffer value
} {
biasvec = vmem(ptr_biasbuf+#0) //
recip_level = memw(sp+#8<<2) //
p3 = cmp.gt(filt_width, #192)
} {
recipvec = vsplat(recip_level) //
allocframe(#72) //
} {
memd(sp+#32) = r25:24 //
memd(sp+#0) = r17:16 //
stride = lsr(stride_depth, #16) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
stride_depth = mpy(stride_depth.H, stride_depth.L)
} {
M0 = stride_depth //
memd(sp+#8) = r19:18 //
memd(sp+#40) = r27:26 //
} {
memw(sp+#48) = ptr_x //
memw(sp+#52) = ptr_yi //
} {
vwsum = vmem(ptr_weightsum+#0) //
r16 = ##0x80000001 //max negative
c4 = #0x4
} {
next_outputs = mpyi(filt_height, in_width)
vc8000 = vsplat(r16) //
memw(sp+#56) = out_width //
} {
stride4= asl(stride_depth, #2) //4-2*stride to corrct for outper pipeline
M0 = stride_depth
} {
M1 = m //outdepth
next_outputs = sub(next_outputs, stride4)
filt_width = lsr(filt_width, #5) //filt_width / 32
} {
maxe= vmem(ptr_max+#0)
in_width_stride = mpyi(in_width, stride) //
filt_width = add(filt_width, #-1)
}
/*===========o=================================================================*/
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#48) //ptr_x
out_height = add(out_height, #-1) //
} {
col_count = memw(sp+#56) //out_width
memw(sp+#48) += in_width_stride //ptr_x=add(ptr_x,in_width) //ptr_x+=in_width
}
.balign 32
.L_width:
{
ptr_y = memw(sp+#52) //ptr_y = ptr_yi initialize filter pointer
loop1(.L_filt_height, filt_height) //[P, 0]for(filt_y=0; filt_y < n; filt_y+=1){
ptr_x = ptr_x0
} {
mpy_cntrl = combine(stride_depth.L, ptr_x.L)
row0 = add(ptr_x, #32)
z1:0 = vmem(ptr_x++M0)
} {
row1 = add(ptr_x, #32)
z3:2 = vmem(ptr_x++M0)
s1s0 = vcombine(vwsum, vwsum) //[P, 0]
p0 = cmp.eq(filt_width, #0)
} {
row2 = add(ptr_x, #32)
z5:4 = vmem(ptr_x++M0)
s3s2 = vcombine(vwsum, vwsum) //[P, 0]
mpy_cntrl = vsubh(mpy_cntrl, c4)
}
.balign 32
.L_filt_height:
{
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
row3 = add(ptr_x, #32)
z7:6 = vmem(ptr_x+#0)
if(p0) jump .L_s32
}
.balign 32
.L_filt_width:
{ y0.tmp = vmem(ptr_y++#2) //[1, 0]32x4
s3s2s1s0.uw += vrmpy_tmp(y0.ub, ++mpy_cntrl.ub)
} {
y1.tmp = vmem(ptr_y+#-1) //[1, 1]32x4
s3s2s1s0.uw += vrmpy_tmp(y1.ub, ++mpy_cntrl.ub)
} {
y2.tmp = vmem(ptr_y++#2) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y2.ub, ++mpy_cntrl.ub)
} {
y3.tmp = vmem(ptr_y+#-1) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y3.ub, ++mpy_cntrl.ub)
} {
y0.tmp = vmem(ptr_y++#2) //[1, 0]32x4
s3s2s1s0.uw += vrmpy_tmp(y0.ub, ++mpy_cntrl.ub)
z1:0 = vmem_fifo(row0++#32)
} {
y1.tmp = vmem(ptr_y+#-1) //[1, 1]32x4
s3s2s1s0.uw += vrmpy_tmp(y1.ub, ++mpy_cntrl.ub)
z3:2 = vmem_fifo(row1++#32)
} {
y2.tmp = vmem(ptr_y++#2) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y2.ub, ++mpy_cntrl.ub)
z5:4 = vmem_fifo(row2++#32)
} {
y3.tmp = vmem(ptr_y+#-1) //[1, 5]32x4
s3s2s1s0.uw += vrmpy_tmp(y3.ub, ++mpy_cntrl.ub)
z7:6 = vmem_fifo(row3++#32)
}:endloop0
.balign 32
.L_s32:
{ y0.tmp = vmem(ptr_y++#2) //[1, 0]32x4
s3s2s1s0.uw += vrmpy_tmp(y0.ub, ++mpy_cntrl.ub)
ptr_x0 = add(ptr_x0, in_width) //[E, 0]move to next line ptr_y keeps going
} {
y1.tmp = vmem(ptr_y+#-1) //[1, 1]32x4
s3s2s1s0.uw += vrmpy_tmp(y1.ub, ++mpy_cntrl.ub)
ptr_x = ptr_x0
} {
y2.tmp = vmem(ptr_y++#2) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y2.ub, ++mpy_cntrl.ub)
} {
y3.tmp = vmem(ptr_y+#-1) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y3.ub, ++mpy_cntrl.ub)
mpy_cntrl2 = combine(stride_depth.L, ptr_x0.L)
} {
y0.tmp = vmem(ptr_y++#2) //[1, 0]32x4
s3s2s1s0.uw += vrmpy_tmp(y0.ub, ++mpy_cntrl.ub)
} {
y1.tmp = vmem(ptr_y+#-1) //[1, 1]32x4
s3s2s1s0.uw += vrmpy_tmp(y1.ub, ++mpy_cntrl.ub)
mpy_cntrl3 = add(mpy_cntrl, #12)
row0 = add(ptr_x0, #32)
} {
y2.tmp = vmem(ptr_y++#2) //[1, 4]32x4
s3s2s1s0.uw += vrmpy_tmp(y2.ub, ++mpy_cntrl.ub)
} {
y3.tmp = vmem(ptr_y+#-1) //[1, 5]32x4
s3s2s1s0.uw += vrmpy_tmp(y3.ub, mpy_cntrl3.ub)
mpy_cntrl = vsubh(mpy_cntrl2, c4)
z1:0 = vmem(ptr_x++M0)
} {
z3:2 = vmem(ptr_x++M0)
row1 = add(ptr_x, #32)
} {
row2 = add(ptr_x, #32)
z5:4 = vmem(ptr_x++M0)
}:endloop1
{
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next set of 4
xsuma = memw(ptr_datasum++#1<<2) //#0
} {
x0 = vsplat(xsuma) //#0
p0 = cmp.gt(col_count, #1) //#1 are there at least 2 levt?
if(p0.new) xsumb = memw(ptr_datasum++#1<<2) //#1
} {
s0.w = vadd(s0.w, x0.w) //#0 add data sum
x1 = vsplat(xsumb) //#1
y1 = rndvec //#1 out1 = 0x8000
} {
maxe.w = vmax(maxe.w, s0.w) //#0 see if z0 is max
s0.w = vadd(s0.w, biasvec.w) //#0 add data sum
p1 = cmp.gt(col_count, #2) //#2
if(p1.new) xsuma = memw(ptr_datasum++#1<<2) //#2
} {
s1.w = vadd(s1.w, x1.w) //#1
x2 = vsplat(xsuma) //#2
p2 = cmp.gt(col_count, #3) //#3
if(p2.new) xsumb = memw(ptr_datasum++#1<<2)//#3
} {
if(!p0) s1 = vc8000 //#1
x1.w = vadd(s1.w, biasvec.w) //#1 add data sum
x3 = vsplat(xsumb) //#3
y0 = rndvec //#0 out0 = 0x8000
} {
y0.w += vmpyie(s0.w, recipvec.uh) //#0
s2.w = vadd(s2.w, x2.w) //#2
} {
y1.w += vmpyie(x1.w, recipvec.uh) //#1
x2.w = vadd(s2.w, biasvec.w) //#2 add data sum
} {
y0.h = vpacko(y0.w, y0.w) //#0 >>16
if(!p1) s2 = vc8000 //#2
s3.w = vadd(s3.w, x3.w) //#3
} {
y1.h = vpacko(y1.w, y1.w) //#1 >>16
if(!p2) s3 = vc8000 //#3
x3.w = vadd(s3.w, biasvec.w) //#3 add data sum
y2 = rndvec //#2 out2 = 0x8000
} {
y0.ub = vpack(y0.h, y0.h):sat //#0 sat8 <0, >255
y2.w += vmpyie(x2.w, recipvec.uh) //#2
y3 = rndvec //#3 out3 = 0x8000
} {
y1.ub = vpack(y1.h, y1.h):sat //#1 sat8 <0, >255
y3.w += vmpyie(x3.w, recipvec.uh) //#3
} {
maxe.w = vmax(maxe.w, s1.w) //#1
y2.h = vpacko(y2.w, y2.w) //#2 >>16
} {
vmem32(ptr_z+#0)= y0 //#0 [E, ]store first 32bytes
ptr_z = add(ptr_z, m) //#0
maxe.w = vmax(maxe.w, s2.w) //#2
y3.h = vpacko(y3.w, y3.w) //#3 >>16
} {
if(p0) vmem32(ptr_z+#0) = y1 //#1 [E, ]store 2nd 32bytes
if(p0) ptr_z = add(ptr_z, m) //#1
y2.ub = vpack(y2.h, y2.h):sat //#2 sat8 <0, >255
maxe.w = vmax(maxe.w, s3.w) //#3
} {
if(p1) vmem32(ptr_z+#0) = y2 //#2 [E, ]store 2nd 32bytes
if(p1) ptr_z = add(ptr_z, m) //#2
y3.ub = vpack(y3.h, y3.h):sat //#3 sat8 <0, >255
col_count = add(col_count, #-4) //
} {
if(p2) vmem32(ptr_z+#0) = y3 //#3 [E, ]store 2nd 32bytes
if(p2) ptr_z = add(ptr_z, m) //#3
p3 = cmp.gt(col_count, #0) //
if(p3.new) jump:t .L_width //
}//end cols per line
{
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
{
loop0(.L_peak, #5) //[P, 0]
r6 = #4 //
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,r6) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
r6 = add(r6, r6) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbb_v66_asm, .L_end-gvconv2dbbb_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 5,873 | hexagon/asm_src/relu_kernel_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : relu_kernel */
/* */
/* DESCRIPTION */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* KG 06/26/17 created */
/*======================================================================*/
/* CYCLE-COUNT: */
/* */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* ASSUMPTIONS */
/* C MODEL */
/*======================================================================*/
#define LOG2VLEN 7
#define VLEN (1<<LOG2VLEN)
#define in_data r0
#define out_data r1
#define bytes r2
#define quantized_zero r3
#define quantized_max r4
#define bytes1 r8
#define sQuantizedZero v0
#define sQuantizedMax v1
#define sIn v2
#define sOut v3
#define sOut2 v4
.text
.globl relu_kernel
.falign
.type relu_kernel,@function
relu_kernel:
{
quantized_zero = vsplatb(quantized_zero) //
bytes = add(bytes, #VLEN-1) //
p0 = cmp.gt(bytes, #0) //
if (!p0.new) jumpr:nt r31 //
} {
bytes = lsr(bytes, #LOG2VLEN) //
sQuantizedZero = vsplat(quantized_zero) //
} {
loop0(.relu_kernel_lp0, bytes) //
}
.falign
.relu_kernel_lp0:
{
sIn.cur = vmem(in_data++#1) //
sOut.ub = vmax(sIn.ub,sQuantizedZero.ub) //
vmem(out_data++#1) = sOut.new //
}:endloop0
{
jumpr r31 //
}
.relu_kernel_end:
.size relu_kernel, .relu_kernel_end-relu_kernel
.globl reluX_kernel
.falign
.type reluX_kernel,@function
reluX_kernel:
{
quantized_zero = vsplatb(quantized_zero) //
bytes1 = add(bytes, #VLEN-1) //
p0 = cmp.gt(bytes, #0) //
if (!p0.new) jumpr:nt r31 //
} {
bytes1 = lsr(bytes1, #LOG2VLEN) //
quantized_max = vsplatb(quantized_max) //
} {
sQuantizedZero = vsplat(quantized_zero) //
sQuantizedMax = vsplat(quantized_max) //
} {
loop0(.reluX_kernel_lp0, bytes1) //
sIn.cur = vmem(in_data++#1) //
sOut.ub = vmax(sIn.ub,sQuantizedZero.ub) //
}
.falign
.reluX_kernel_lp0:
{
sIn.cur = vmem(in_data++#1) //
sOut.ub = vmax(sIn.ub,sQuantizedZero.ub) //
sOut2.ub = vmin(sOut.ub,sQuantizedMax.ub) //[2]
vmem(out_data++#1) = sOut2.new //[2]
}:endloop0
{
jumpr r31 //
}
.reluX_kernel_end:
.size reluX_kernel, .reluX_kernel_end-reluX_kernel
|
XiaoMi/nnlib | 16,158 | hexagon/asm_src/im2col7732_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
#define ptrIn r0
#define ptrOut r1
#define xoffset r2
#define ptrDelta r3
#define startorow r4
#define numorows r5
#define endrows r23
#define selp r6 //used to select the predicates
#define sel0 r7
#define sel1 r8
#define sel2 r9
#define sel3 r10
#define sel4 r11
#define sel5 r12
#define sel6 r13
#define sel7 r14
#define ptrRow0 r15
#define ptrRow1 r16
#define rot0 r17
#define optr r18
#define cm21 r19
#define cm0 r20
#define row_cnt r21
#define line_cnt r22
#define offset r15
#define iwidth r16
#define vpred0 v0
#define in0 v1
#define in1 v2
#define min0 v3
#define min1 v4
#define out01 v5
#define out23 v8
#define out45 v11
#define out67 v14
#define vxoffset v15
#define vpred1 v16
#define vpred2 v17
#define vpred3 v18
#define vpred4 v19
#define vpred5 v20
#define vpred6 v21
#define vpred7 v22
#define vqp q1
#define vq0 q0
#define vq1 q3
#define vq2 q3
#define vq3 q3
#define vq4 q3
#define vq5 q0
#define vq6 q2
#define vq7 q1
#define vq8 q2
/* ========================================================================= */
.global im2col7732_asm
.type im2col7732_asm, @function
.balign 32
im2col7732_asm:
/* ============================================================================ */
{
allocframe(#64)
xoffset = vsplatb(xoffset)
} {
memd(sp+#0) = r17:16
memd(sp+#8) = r19:18
sel0 = #160
} {
memd(sp+#16)= r21:20
memd(sp+#24)= r23:22
} {
memd(sp+#32)= r25:24
memd(sp+#40)= r27:26
M1 = sel0
sel0 = #32
} {
vpred0 = vmem(ptrDelta+#0)
M0 = sel0
offset = #-2
} {
vxoffset = vsplat(xoffset)
vpred1 = vmem(ptrDelta+#1)
offset += add(startorow, startorow)
} {
sel0 = ##0x01010101
vpred2 = vmem(ptrDelta+#2)
iwidth = #672
} {
sel1 = add(sel0, sel0)
sel2 = asl(sel0, #2)
sel3 = asl(sel0, #3)
vpred3 = vmem(ptrDelta+#3)
} {
sel4 = add(sel3, sel3)
sel5 = asl(sel3, #2)
sel6 = asl(sel3, #3)
vpred4 = vmem(ptrDelta+#4)
} {
vpred5 = vmem(ptrDelta+#5)
sel7 = asl(sel4, #3)
p0 = and(p0, !p0) //epilog
endrows = add(startorow, numorows)
} {
vpred6 = vmem(ptrDelta+#6)
vqp = vand(vpred1, sel1) //extract the pre mask
p2 = cmp.eq(startorow, #0) //on prolog
offset = mpyi(offset, iwidth)
} {
p1 = and(p1, !p1) //off epilog
vpred7 = vmem(ptrDelta+#7)
line_cnt = startorow
ptrIn = add(ptrIn, offset)
}
/* ============================================================================ */
.L_line_loop:
/* ============================================================================ *
0td and 1st row of filter
* ============================================================================ */
{
cm21 = #-21
ptrRow0 = ptrIn
optr = add(ptrOut,#0)
selp = mux(p2,sel7,sel1) //select the prolog padding
} {
row_cnt = #0;
ptrRow0 = add(ptrRow0, #-6) //retard by 6
ptrRow1 = add(ptrRow0, #666) //3*224-6
loop1(.L_outer0, #7)
}
.balign 32
.L_outer0:
{
in0 = vmemu(ptrRow0)
ptrRow0 = add(ptrRow0, #96)
vqp = vand(vpred1, selp)
} {
in0 = vmux(vqp, vxoffset, in0)
in1 = vmemu(ptrRow1)
p3 = cmp.eq(row_cnt, #5) //ultimate stage
} {
ptrRow1 = add(ptrRow1, #96)
in1 = vmux(vqp, vxoffset, in1)
rot0 = #0
selp = mux(p3,sel2, sel3) //from there always choose the data
} {
selp = mux(p2, sel7, selp) //either ficre prolog = offset or normal
row_cnt = add(row_cnt, #1)
in1 = vror(in1, cm21)
} {
vq0 = vand(vpred0, sel0) //[0, 0]
in1 = vror(in1, rot0) //[0, 1]
rot0 = #-26 //[0, 1]
} {
loop0(.L_loop00, #4) //create 16 x 42 values
out01 = vmux(vq0, in0, in1) //[0, 2]
vq1 = vand(vpred0, sel1) //[0, 2]
in0 = vror(in0, rot0) //[0, 2]
}
.balign 32
.L_loop00:
{
if(vq1) vmem(optr++M1) = out01 //advance 0
in1 = vror(in1, rot0) //[0, 3]
vq2 = vand(vpred0, sel2) //[0, 3]
} {
out23 = vmux(vq2, in0, in1) //[0, 4]
vq3 = vand(vpred0, sel3) //[0, 4]
in0 = vror(in0, rot0) //[0, 4]
} {
if(vq3) vmem(optr++M1) = out23 //advance 160
in1 = vror(in1, rot0) //[0, 5]
vq4 = vand(vpred0, sel4) //[0, 5]
} {
out45 = vmux(vq4, in0, in1) //[0, 6]
vq5 = vand(vpred0, sel5) //[0, 6]
in0 = vror(in0, rot0) //[0, 6]
} {
if(vq5) vmem(optr++M1) = out45 //advance 320
in1 = vror(in1, rot0) //[0, 7]
vq6 = vand(vpred0, sel6) //[0, 7]
} {
out67 = vmux(vq6, in0, in1) //[0, 8]
vq8 = vand(vpred1, sel0) //[0, 8]
in0 = vror(in0, rot0) //[1, 0]
vq0 = vand(vpred0, sel0) //[1, 0]
} {
if(vq8) vmem(optr+#1) = out67 //[0, 9]
vq7 = vand(vpred0, sel7) //[1, 9]
in1 = vror(in1, rot0) //[1, 1]
rot0 = #-26 //[1, 1]
} {
if(vq7) vmem(optr++M1) = out67 //advance 480
out01 = vmux(vq0, in0, in1) //[1, 2]
vq1 = vand(vpred0, sel1) //[1, 2]
in0 = vror(in0, rot0) //[1, 2]
}:endloop0:endloop1
/* ============================================================================ *
2nd and 3rd row of filter
* ============================================================================ */
{
ptrRow0 = add(ptrIn, #1344) //+0
optr = add(ptrOut,#0)
selp = sel1
row_cnt = #0;
} {
cm21 = #-21 //-63 //3*21
ptrRow0 = add(ptrRow0, #-6) //-6
ptrRow1 = add(ptrRow0, #666) //3*224-6
loop1(.L_outer1, #7)
}
.balign 32
.L_outer1:
{
in0 = vmemu(ptrRow0)
ptrRow0 = add(ptrRow0, #96)
vqp = vand(vpred1, selp)
} {
in0 = vmux(vqp, vxoffset, in0)
in1 = vmemu(ptrRow1)
ptrRow1 = add(ptrRow1, #96)
} {
p3 = cmp.eq(row_cnt, #5)
in1 = vmux(vqp, vxoffset, in1)
} {
selp = mux(p3,sel2,sel3) //from there always choose the data
in1 = vror(in1, cm21)
rot0 = #-42
} {
row_cnt = add(row_cnt, #1);
in0 = vror(in0, rot0)
vq0 = vand(vpred2, sel0)
loop0(.L_loop10, #4) //create 16 x 42 values
}
.balign 32
.L_loop10:
{
in1 = vror(in1, rot0)
rot0 = #-26
} {
out01 = vmux(vq0, in0, in1)
vq1 = vand(vpred2, sel1)
in0 = vror(in0, rot0)
} {
if(vq1) vmem(optr++M1) = out01 //advance 0
in1 = vror(in1, rot0)
vq2 = vand(vpred2, sel2)
} {
out23 = vmux(vq2, in0, in1)
vq3 = vand(vpred2, sel3)
in0= vror(in0, rot0)
} {
if(vq3) vmem(optr++M1) = out23 //advance 160
in1 = vror(in1, rot0)
vq4 = vand(vpred2, sel4)
} {
out45 = vmux(vq4, in0, in1)
in0= vror(in0, rot0)
vq6 = vand(vpred2, sel6)
} {
if(vq6) vmem(optr+#1) = out45 //
vq5 = vand(vpred2, sel5)
in1 = vror(in1, rot0)
vq7 = vand(vpred2, sel7)
} {
if(vq5) vmem(optr) = out45 //advance 320
out67 = vmux(vq7, in0, in1)
vq8 = vand(vpred3, sel0)
optr = add(optr, #288)
} {
if(vq8) vmem(optr++M0) = out67 //advance 480
in0 = vror(in0, rot0) //[1, 0]
vq0 = vand(vpred2, sel0) //[1, 0]
}:endloop0:endloop1
/* ============================================================================ *
4th and 5th row of filter
* ============================================================================ */
{
cm21 = #-21 //105 //5*21
ptrRow0 = add(ptrIn, #2688) //3*224 * 4
optr = add(ptrOut,#0)
row_cnt = #0
} {
selp = mux(p1,sel7,sel1) //pad the last row
ptrRow0 = add(ptrRow0, #-6) //retard by 6
ptrRow1 = add(ptrRow0, #666) //3*224-6
loop1(.L_outer2, #7)
}
.balign 32
.L_outer2:
{
in0 = vmemu(ptrRow0)
ptrRow0 = add(ptrRow0, #96)
vqp = vand(vpred1, selp)
} {
in1 = vmemu(ptrRow1)
ptrRow1 = add(ptrRow1, #96)
in0 = vmux(vqp, vxoffset, in0)
} {
p3 = cmp.eq(row_cnt, #5)
in1 = vmux(vqp, vxoffset, in1)
row_cnt = add(row_cnt, #1)
} {
in1 = vror(in1, cm21)
rot0 = #-84
} {
selp = mux(p3,sel2,sel3) //from there always choose the data
in0 = vror(in0, rot0)
vq0 = vand(vpred4, sel0)
} {
selp = mux(p1, sel7, selp)
in1 = vror(in1, rot0)
rot0 = #-26
loop0(.L_loop20, #4) //create 16 x 42 values
}
.balign 32
.L_loop20:
{
out01 = vmux(vq0, in0, in1)
vq1 = vand(vpred4, sel1)
in0 = vror(in0, rot0)
} {
if(vq1) vmem(optr++M1) = out01 //advance 0
in1 = vror(in1, rot0)
vq2 = vand(vpred4, sel2)
} {
out23 = vmux(vq2, in0, in1)
vq4 = vand(vpred4, sel4)
in0= vror(in0, rot0)
vq6 = vand(vpred4, sel6)
} {
if(vq4) vmem(optr+#1) = out23 //advance 160
vq3 = vand(vpred4, sel3)
in1 = vror(in1, rot0)
vq5 = vand(vpred4, sel5)
} {
if(vq3) vmem(optr) = out23 //advance 160
optr = add(optr, #288)
out45 = vmux(vq5, in0, in1)
in0= vror(in0, rot0)
} {
in1 = vror(in1, rot0)
vq7 = vand(vpred4, sel7)
if(vq6) vmem(optr++M1) = out45 //advance 320
} {
out67 = vmux(vq7, in0, in1)
vq8 = vand(vpred5, sel0)
in0 = vror(in0, rot0) //[1, 0]
vq0 = vand(vpred4, sel0) //[1, 0]
} {
if(vq8) vmem(optr++M0) = out67 //advance 480
in1 = vror(in1, rot0) //[1, 1]
}:endloop0:endloop1
/* ============================================================================ *
6th row of filter and 13 pixel pad to make 160 pixel block
* ============================================================================ */
{
ptrRow0 = add(ptrIn, #4032) //6*3*224
optr = add(ptrOut,#0)
row_cnt = #0
} {
selp = mux(p0, sel7, sel1)
ptrRow0 = add(ptrRow0, #-6) //element 4
loop1(.L_outer3, #7)
}
.balign 32
.L_outer3:
{
in0 = vmemu(ptrRow0)
ptrRow0 = add(ptrRow0, #96)
vqp = vand(vpred1, selp)
} {
in0 = vmux(vqp, vxoffset, in0)
in1 = vxoffset
p3 = cmp.eq(row_cnt, #5)
rot0 = #2 //-126 6x21
} {
selp = mux(p3, sel2, sel3)
loop0(.L_loop30, #4) //create 16 x 42 values
in0 = vror(in0, rot0) //[0, 0]
vq0 = vand(vpred6, sel0) //[0, 0]
} {
rot0 = #-26 //[0, 0]
} {
row_cnt = add(row_cnt, #1)
selp = mux(p0, sel7, selp)
out01 = vmux(vq0, in0, in1) //[0, 1]
vq2 = vand(vpred6, sel2) //[0, 1]
}
.balign 32
.L_loop30:
{
if(vq2) vmem(optr+#1) = out01 //advance 160
vq1 = vand(vpred6, sel1)
in0 = vror(in0, rot0)
} {
if(vq1) vmem(optr++M1) = out01 //advance 0
vq3 = vand(vpred6, sel3)
} {
optr = add(optr, #128)
out23 = vmux(vq3, in0, in1)
in0= vror(in0, rot0)
vq4 = vand(vpred6, sel4)
} {
if(vq4) vmem(optr++M1) = out23 //advance 160
vq5 = vand(vpred6, sel5)
} {
out45 = vmux(vq5, in0, in1)
in0= vror(in0, rot0)
vq6 = vand(vpred6, sel6)
vq7 = vand(vpred6, sel7)
} {
if(vq6) vmem(optr++M1) = out45 //advance 320
} {
out67 = vmux(vq7, in0, in1)
vq8 = vand(vpred7, sel0)
in0 = vror(in0, rot0) //[1, 0]
vq0 = vand(vpred6, sel0) //[1, 0]
} {
if(vq8) vmem(optr++M0) = out67 //advance 480
out01 = vmux(vq0, in0, in1) //[1, 1]
vq2 = vand(vpred6, sel2) //[1, 1]
}:endloop0:endloop1
/* ============================================================================ */
{
line_cnt = add(line_cnt, #1)
} {
p0 = cmp.eq(line_cnt, endrows)
ptrOut = add(ptrOut,#17920) //add 112*160
p2 = and(p2, !p2)
} {
p1 = cmp.gt(line_cnt, #110) //is this the last lines
ptrIn = add(ptrIn, #1344) //add 224*3
if(!p0) jump:t .L_line_loop
p0 = cmp.gt(line_cnt, #109) //is this the last 2 line
}
/* ============================================================================ */
.L_quit:
{
r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
r21:20 = memd(sp+#16)
r23:22 = memd(sp+#24)
} {
r25:24 = memd(sp+#32)
r27:26 = memd(sp+#40)
}
dealloc_return
/* ============================================================================ */
.L_end:
.size im2col3332_asm, .L_end-im2col3332_asm
|
XiaoMi/nnlib | 14,589 | hexagon/asm_src/fullyconnected.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.file "fullyconnected.S"
/*======================================================================*/
/* FUNCTIONS : fully_connected_asm */
/* */
/* DESCRIPTION */
/* Perform matrix vector multiply, result 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* MZ 09/07/17 created */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* STACK = bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is16byte aligned */
/* C MODEL */
/*======================================================================*/
/*=============================================================================*/
#define ptr_d r0 //data
#define ptr_w r1 //weights must be pre processwed and transposed
#define ptr_o r2
#define batch r3
#define in_depth r4
#define out_depth r5
#define ptr_m r6
#define recip r7
#define recip_ptr_m r7:6
#define ptr_suma r8
#define ptr_bias r9
#define ptrs_bias_suma r9:8
/*=============================================================================*/
#define ptr_wk r7
#define k r7
#define l2feparam_w_l r10
#define stride r11
#define l2feparam_w r11:10
#define lc1 r12
#define out_d r13
#define d3210 r14
#define d7654 r15
#define d7654_d3210 r15:14
#define count r16
#define lc0 r17
#define lc0_count r17:16
#define c7fffffff r18
#define sumaval r18
#define ptr_w_fe r19
#define l2f_w_win0 r20
#define l2f_w_win1 r21
#define indepthd8mod r21
#define indepthd8 r28
/*=============================================================================*/
#define ssum0_0 v0
#define ssum1_0 v1
#define ssum2_0 v2
#define ssum3_0 v3
#define sbias0 v4
#define sbias1 v4
#define sbias2 v4
#define sbias3 v4
#define sw0 v4
#define sw1 v4
#define sw2 v4
#define sw3 v4
#define ssumaval v5
#define srecip V6
#define smin v8
#define smax v9
#define smin_t v10
#define smax_t v11
#define ssumr0_0 v10
#define ssumr1_0 v11
#define ssumr2_0 v12
#define ssumr3_0 v13
#define ssumrh10_0 v14
#define ssumrh32_0 v15
#define sout0 v14
/*=============================================================================*/
.text
.global fully_connected_asm
.balign 32
.type fully_connected_asm, @function
fully_connected_asm:
{
recip_ptr_m = memd(sp+#0) //
ptrs_bias_suma = memd(sp+#8) //
sp = add(sp,#-3*8) //
indepthd8 = lsr(in_depth,#3) //
}{
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
stride = asl(out_depth,#2) //
}{
memd(sp+#16) = r21:20 //
indepthd8mod = and(indepthd8,#15) //
c7fffffff = ##0x7fffffff //
}{
m0 = stride //
p0 = cmp.eq(indepthd8mod,#0) //
lc1 = lsr(indepthd8,#4) //
}{
l2f_w_win0 = ##0x02000010 // for fetch a block of width=512,height=16
if p0 indepthd8mod = #16 //
smin = vsplat(c7fffffff) //
}{
if !p0 lc1 = add(lc1,#1) //ceil((in_depth/8)/16)
l2f_w_win1=combine(l2f_w_win0.h,indepthd8mod.l)//
smax = vnot(smin) //
srecip = vsplat(recip) //
}{
p0 = cmp.eq(lc1,#1) //
if p0.new l2f_w_win0 = l2f_w_win1 //
}
/*============================================================================*/
.fully_connected_batches_loop: // TODO: optimize when batch>1
{
out_d = out_depth //
sumaval = memw(ptr_suma++#4) //
l2feparam_w_l = l2f_w_win0 //
}{
ssumaval = vsplat(sumaval) //
p3 = !cmp.gt(out_d,#0) //
}
.balign 32
.fully_connected_outdepth_loop:
{
loop1(.fully_connected_indepth_loop,lc1) //
ptr_wk = add(ptr_w,#4*128) //
sbias0.tmp = vmem(ptr_bias++#1) //
ssum0_0.w = vadd(sbias0.w,ssumaval.w) //
}{
out_d = add(out_d,#-128) //
sbias1.tmp = vmem(ptr_bias++#1) //
ssum1_0.w = vadd(sbias1.w,ssumaval.w) //
count = indepthd8 //
}{
lc0 = #16 //
sbias2.tmp = vmem(ptr_bias++#1) //
ssum2_0.w = vadd(sbias2.w,ssumaval.w) //
nop
}{
sbias3.tmp = vmem(ptr_bias++#1) //
ssum3_0.w = vadd(sbias3.w,ssumaval.w) //
d7654_d3210 = memd(ptr_d++#8) //
nop
}
.balign 32
.fully_connected_indepth_loop:
{
lc0 = min(lc0,count) //
p0 = cmp.gt(count,#32) //
if !p0.new l2feparam_w_l = l2f_w_win1 //
ptr_w_fe = addasl(ptr_w,stride,#4) //
}{
loop0(.fully_connected_indepth_innerloop,lc0)//
p0 = cmp.gt(count,#16) // p0 = not last iteration?
if !p0.new ptr_w_fe = ptr_wk //
if !p0.new l2feparam_w_l = l2f_w_win0 //
}{
p0 = not(p0) // if last block,
p0 = !cmp.gt(out_d,#0) // then set to 0 to cancel l2fetch
if p0.new l2feparam_w_l = #0 //
if p0.new ptr_w_fe = ptr_w //
}{
l2fetch(ptr_w_fe,l2feparam_w) //
nop; nop; nop
}
.balign 32
.fully_connected_indepth_innerloop:
{
sw3.tmp = vmem(ptr_w+#3) //
ssum3_0.uw += vrmpy(sw3.ub,d3210.ub) //
}{
sw2.tmp = vmem(ptr_w+#2) //
ssum2_0.uw += vrmpy(sw2.ub,d3210.ub) //
}{
sw1.tmp = vmem(ptr_w+#1) //
ssum1_0.uw += vrmpy(sw1.ub,d3210.ub) //
}{
sw0.tmp = vmem(ptr_w++m0) //
ssum0_0.uw += vrmpy(sw0.ub,d3210.ub) //
count = add(count,#-1) //
}{
sw3.tmp = vmem(ptr_w+#3) //
ssum3_0.uw += vrmpy(sw3.ub,d7654.ub) //
p0 = cmp.eq(count,#0) //
}{
sw2.tmp = vmem(ptr_w+#2) //
ssum2_0.uw += vrmpy(sw2.ub,d7654.ub) //
}{
sw1.tmp = vmem(ptr_w+#1) //
ssum1_0.uw += vrmpy(sw1.ub,d7654.ub) //
}{
sw0.tmp = vmem(ptr_w++m0) //
ssum0_0.uw += vrmpy(sw0.ub,d7654.ub) //
if !p0 d7654_d3210 = memd(ptr_d++#8) //
}:endloop0:endloop1
.fully_connected_indepth_lpend:
{
ssumr3_0.w = vmpye(ssum3_0.w,srecip.uh) //
ssumrh32_0.h=vpack(ssumr3_0.w,ssumr2_0.w):sat//[2]
}{
ssumr2_0.w = vmpye(ssum2_0.w,srecip.uh) //
ssumrh10_0.h=vpack(ssumr1_0.w,ssumr0_0.w):sat//[2]
}{
ssumr3_0.w += vmpyo(ssum3_0.w,srecip.h):<<1:rnd:sat:shift
smax.w = vmax(smax.w,ssum3_0.w) //
smin.w = vmin(smin.w,ssum3_0.w) //
}{
ssumr2_0.w += vmpyo(ssum2_0.w,srecip.h):<<1:rnd:sat:shift
sout0.ub=vpack(ssumrh32_0.h,ssumrh10_0.h):sat//[2]
if p3 vmem(ptr_o++#1) = sout0.new //[2]
}{
ssumr1_0.w = vmpye(ssum1_0.w,srecip.uh) //
smax.w = vmax(smax.w,ssum2_0.w) //
smin.w = vmin(smin.w,ssum2_0.w) //
}{
ssumr0_0.w = vmpye(ssum0_0.w,srecip.uh) //
smax.w = vmax(smax.w,ssum1_0.w) //
smin.w = vmin(smin.w,ssum1_0.w) //
}{
ssumr1_0.w += vmpyo(ssum1_0.w,srecip.h):<<1:rnd:sat:shift
smax.w = vmax(smax.w,ssum0_0.w) //
smin.w = vmin(smin.w,ssum0_0.w) //
p3 = cmp.gt(out_d,#0) //
}{
ssumr0_0.w += vmpyo(ssum0_0.w,srecip.h):<<1:rnd:sat:shift
ptr_w = ptr_wk //
if p3 ptr_d = sub(ptr_d,in_depth) //
if p3 jump .fully_connected_outdepth_loop //
}
.fully_connected_outdepth_lpend:
{
ssumrh32_0.h=vpack(ssumr3_0.w,ssumr2_0.w):sat//[2]
batch = add(batch,#-1) //
}{
ssumrh10_0.h=vpack(ssumr1_0.w,ssumr0_0.w):sat//[2]
ptr_w = sub(ptr_w,stride) //
ptr_bias = sub(ptr_bias,stride) //
p0 = cmp.gt(batch,#0) //
}{
sout0.ub=vpack(ssumrh32_0.h,ssumrh10_0.h):sat//[2]
vmem(ptr_o++#1) = sout0.new //[2]
if p0 jump .fully_connected_batches_loop //
}
/*=============================================================================*/
{
loop0(.fully_connect_reducemax_loop,#5) //
k = #64 //
r17:16 = memd(sp+#0) //
r19:18 = memd(sp+#8) //
}
.falign
.fully_connect_reducemax_loop:
{
smax_t = vror(smax,k) //
}{
smin_t = vror(smin,k) //
smax.w = vmax(smax.w,smax_t.w) //
}{
smin.w = vmin(smin.w,smin_t.w) //
k = lsr(k,#1) //
}:endloop0
{
vmem(ptr_m+#0) = smax //
}{
vmem(ptr_m+#1) = smin //
}{
r21:20 = memd(sp+#16) //
sp = add(sp,#3*8) //
jumpr r31 //
}
.fully_connected_asm_end:
/*=============================================================================*/
.size fully_connected_asm, .-fully_connected_asm
|
XiaoMi/nnlib | 6,816 | hexagon/asm_src/gemaccb_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemaccb_asm */
/* */
/* DESCRIPTION */
/* Sum Y matrix vertically and multiply by a_offset */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 07/10/16 modified input tranpose operation*/
/*======================================================================*/
/* IDEAL-CYCLE-COUNT: */
/* -> M*K/128+13 */
/* */
/* MEMORY */
/* CODESIZE = 352 bytes */
/* STACK = 0 bytes */
/* ASSUMPTIONS */
/* y 128 byte aligned */
/* x is 8byte aligned */
/* K%8=0 M%128=0 */
/* C MODEL */
/* K = Klen | Kstride */
/* M = Mlen | Mstride */
/* write output into blocks width same as size to save memory */
/*======================================================================*/
#if 0
void gemaccb_cn(uint8 * b, uint8 * c, int K, int8 a_offset) {
int j, k;
int32 sumb;
uint8 b_val;
if(a_offset != 0)
for (j=0; j < 32; j++) {
sumb = 0;
for (k=0; k < K; k++) {
b_val = b[k*M+j];
sumb += b_val ;
}
c[j] += sumb*a_offset;
}
else
for (j=0; j < 32; j++) {
c[j] = 0;
}
return;
}
#endif
/*======================================================================*/
.text
.file "gemaccb_h.S"
.global gemaccb_asm
.balign 32
.type gemaccb_asm, @function
gemaccb_asm:
/*======================================================================*/
#define ptr_y r0 //Y matrix aligned to 128bytes
#define ptr_z r1 //integer accumulation of row of Y * xoffset
#define k r2 //k
#define x_offset r3 //input offset
#define dotp r4 //
#define c16 r5 //
/*======================================================================*/
#define z0 v0 //
#define vx_offset v1
#define y0 v2 //
#define z1 v3 //
#define z2 v4 //
/*======================================================================*/
{
k = lsr(k, #2) //inherent /4
vx_offset = vsplat(x_offset) //replicate words
dotp = ##0x01010101 //
p0 = cmp.eq(x_offset, #0)
} {
if(p0) jump .L_zero_offset
c16 = #16 //
z0 = #0 //
loop0(.L_loopK, k) //[P, 2]ki is k/8 - 1
}
/*======================================================================*/
.balign 32
.L_loopK:
{
y0.tmp = vmem(ptr_y++#1) //[1, 0]
z0.uw += vrmpy(y0.ub, dotp.ub) //[1, 8]
}:endloop0
/*=======================================================================*/
.L_zero_offset:
{
z1.w = vmpyio(z0.w, vx_offset.h) //do full 32bit
} {
z2 = vmem(ptr_z+#0) //
} {
z1.w = vasl(z1.w, c16) //
} {
z1.w += vmpyie(z0.w, vx_offset.uh) //
} {
z2.w = vadd(z2.w, z1.w) //
vmem(ptr_z+#0) = z2.new //
}{
jumpr r31 //
}
/* ===================================================================== */
.L_end:
.size gemaccb_asm, .L_end-gemaccb_asm
|
XiaoMi/nnlib | 6,549 | hexagon/asm_src/gvmsumb_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmsumb_asm */
/* */
/* DESCRIPTION */
/* Sum Y matrix vertically and multiply by a_offset */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 07/10/16 modified input tranpose operation*/
/*======================================================================*/
/* IDEAL-CYCLE-COUNT: */
/* -> M*K/128+13 */
/* */
/* MEMORY */
/* CODESIZE = 160 bytes */
/* STACK = 0 bytes */
/* ASSUMPTIONS */
/* y 128 byte aligned */
/* x is 8byte aligned */
/* K%4 =0 M%32=0 */
/* C MODEL */
/* write output into blocks width same as size to save memory */
/*======================================================================*/
#if 0
void gvmsumb_cn(uint8 * b, int * c, int K, int M, int a_offset) {
int j, k;
int32 sumb;
uint8 b_val;
for (j=0; j < M; j++) {
sumb = 0;
for (k=0; k < K; k++) {
b_val = b[k*M+j];
sumb += b_val;
}
c[i*M+j] = sumb*a_offset;
}
return;
}
#endif
/*======================================================================*/
.text
.file "gvmsumb_h.S"
.global gvmsumb_asm
.balign 32
.type gvmsumb_asm, @function
gvmsumb_asm:
/*======================================================================*/
#define ptr_y r0 //Y matrix aligned to 128bytes
#define ptr_z r1 //integer accumulation of row of Y * xoffset
#define k r2 //k
#define x_offset r3 //input offset
#define dotp r4 //
#define c16 r5 //
/*======================================================================*/
#define z0 v0 //
#define vx_offset v1 //
#define y0 v2 //
#define z1 v3 //
/*======================================================================*/
{
k = lsr(k, #2) //inherent /4
vx_offset = vsplat(x_offset) //replicate words
p0 = cmp.eq(x_offset, #0)
} {
if(p0) jump .L_pass //
dotp = ##0x01010101 //
k = add(k, #-1) //
} {
loop0(.L_loopK, k) //[P, 2]ki is k/8 - 1
y0.tmp = vmem(ptr_y++#1) //[1, 0]
z0.uw = vrmpy(y0.ub, dotp.ub) //[1, 8]
c16 = #16 //
}
/*======================================================================*/
.balign 32
.L_loopK:
{ y0.tmp = vmem(ptr_y++#1) //[1, 0]
z0.uw += vrmpy(y0.ub, dotp.ub) //[1, 8]
}:endloop0
/*=======================================================================*/
.L_pass:
{ z1.w = vmpyio(z0.w, vx_offset.h) //do full 32bit
} {
z1.w = vasl(z1.w, c16) //
} {
z1.w += vmpyie(z0.w, vx_offset.uh) //
vmem(ptr_z+#0) = z1.new //
}{
jumpr r31 //
}
/* ===================================================================== */
.L_end:
.size gvmsumb_asm, .L_end-gvmsumb_asm
|
XiaoMi/nnlib | 38,873 | hexagon/asm_src/dwconv2dbbb_unsigned_s2_d32_v60_h.S | /*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_s2_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements along depth, do only simple
* convolution. Stride horizontally by 2.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 10/10/17 created
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = 768 bytes
* STACK = 80 bytes
* ASSUMPTIONS
*/
#if 0
void dwconv2d_cn(
uint8_t *in_buf, int in_width, int in_height, int depth,
int stride_width, int stride_height, int in_offset,
uint8_t *filt, int filt_width, int filt_height, int filt_offset,
int *out_buf, int out_width, int out_height, int adj_x, int adj_y)
{
int out_y, in_y_base, out_x, in_x_base;
int out_z, filt_y, filt_x, in_element, filt_element, sum;
int * outstripe;
uint8_t * instripe;
uint8_t * filtstripe;
for (out_y = 0; out_y < out_height; out_y++) {
in_y_base = out_y * stride_height - adj_y;
for (out_x = 0; out_x < out_width; out_x++) {
in_x_base = out_x * stride_width - adj_x;
outstripe = out_buf+(depth*(out_x+ out_width*out_y));
for (out_z = 0; out_z < depth; out_z++) {
sum = 0;
for (filt_y = 0; filt_y < filt_height; filt_y++) {
if ((in_y_base + filt_y) >= in_height) continue;
if ((in_y_base + filt_y) < 0) continue;
for (filt_x = 0; filt_x < filt_width; filt_x++) {
if ((in_x_base + filt_x) >= in_width) continue;
if ((in_x_base + filt_x) < 0) continue;
filtstripe = filt+(depth*(filt_x+ filt_width*filt_y));
filt_element = filtstripe[out_z] - filt_offset;
instripe = in_buf+(depth*(in_x_base + filt_x + in_width*(in_y_base + filt_y))) ;
in_element = instripe[out_z] - in_offset;
sum += in_element*filt_element;
}
}
outstripe[out_z] = sum;
}
}
}
return;
}
#endif
#if 0
/*=============================================================================*/
.text
.file "dwconv2dbbb_unsigned_s2_d32_v60_h.S"
.global dwconv2dbbb_unsigned_s2_v60_asm
.balign 32
.type dwconv2dbbb_unsigned_s2_v60_asm, @function
dwconv2dbbb_unsigned_s2_v60_asm:
/*=============================================================================*/
//stride assumed 2 filt width assumed 3 - horz stride 1 uses stride1 only
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define next_in_width_depth r3 //width*depth*(stride==2)
#define next_out_width_depth r4 //next output line amount in bytes
#define next_in_width_32 r5 //width*32*(stride==2)
#define next_out_width_32 r16 //0 next output line amount in bytes
#define in_depth r17 //1 total in depth split into rows of depth 32
#define out_width r18 //2 is amount of work to be done
#define out_height r19 //3 number of vertical lines to perform
#define filt_height r20 //4 filt_height lines per filter
#define ptr_max r21 //5 maximum and minum buffer
#define recip_level r22 //6 255 / (MAX - MIN) - used to scale to bytes
#define filt_sumi r23 //7 gemsumb
#define stride_height r24 //8 vertical strideing any number
#define zshift r26 //9 shift correction for small accs
#define padding r27 //10 padding = 1 then shift 8 pad else shift 0
#define filt_offset r6 //11
//-----------------------------------------------------------------
#define c4 r6 //integer max find
#define s16 r9 //const = 16
#define in_wide_deep_high_256 r8 //width*depth*filt_height - 256
#define depth r10 //current depth used
#define ptr_x0 r11 //widthdata ptr
#define ptr_x1 r12 //depth ptr data
#define ptr_z0 r13 //depth otuput ptr
#define ptr_z1 r14 //width output ptr
#define ptr_w r15 //ptr to weights
#define filt_sum r22 //ptr to gemsumb
#define col_count r25 //width count down
#define out_wide_deep_128 r7 //advance ptr 128 along and pack to current line start
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0 //32bit recprocal scale 1/max-min
#define s00 v2 //
#define s01 v3 //
#define s01_s00 v3:2 //
#define s10 v4 //
#define s11 v5 //
#define s00_s v7 //
#define s01_s v8 //
#define s10_s v9 //
#define s11_s v17 //
#define d1_d0 v11:10 //
#define d3_d2 v13:12 //
#define d0 v10 //
#define d1 v11 //
#define d2 v12 //
#define d3 v13 //
#define d1d0 v10 //
#define d3d2 v12 //
#define d3_d0 v12 //
#define y0 v21 //
#define y1 v24 //
#define y2 v16 //
#define x0 v13 //
#define x1 v10 //
#define x2 v10 //
#define z3210 v26 //
#define z5432 v28 //
#define z7654 v29 //
#define zba98 v9 //
#define z9876 v6 //
#define z54__ v19 //
#define z__76 v20 //
#define z5476 v27 //
#define w_210 v22 //
#define ww210 v18 //
#define u_210 v23 //
#define maxo_maxe v31:30 //
#define mino_mine v15:14 //
#define maxe v30 //
#define mine v14 //
#define maxo v31 //
#define mino v15 //
#define C0 v1
#define C1 v25
#define C2 v31
#define C3 v15
#define SSR <<1:rnd:sat:shift //simplify mpy instruction
/*=============================================================================*/
{ allocframe(#72) // 0th entry on stack is (72+8)/4 =20 ints
maxe = #0 //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
s16= #16 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
r23 = ##0x7fffffff //max pos
} {
mine = vsplat(r23) //
zshift = memw(sp+#29<<2) //1 - #8 2 - 0
filt_offset = memw(sp+#31<<2) //
} {
maxe.w = vsub(maxe.w, mine.w) //
next_out_width_32 = memw(sp+#20<<2) //
in_depth = memw(sp+#21<<2) //
filt_offset = vsplatb(filt_offset) //
} {
out_width = memw(sp+#22<<2) //
out_height = memw(sp+#23<<2) //
filt_offset = asl(filt_offset, #8) //FFF0
} {
recip_level = memw(sp+#26<<2) //
padding = memw(sp+#30<<2) //1 - #8 2 - 0
out_wide_deep_128=add(next_out_width_depth,#-128) //
filt_offset = lsr(filt_offset, #8) //0FFF
} {
vrecip = vsplat(recip_level) //
filt_sumi = memw(sp+#27<<2) //
out_wide_deep_128=add(out_wide_deep_128,next_out_width_32) //
} {
filt_height = memw(sp+#24<<2) //
stride_height = memw(sp+#28<<2) //skip n vert lines
in_wide_deep_high_256=add(next_in_width_depth, #-256) //
filt_offset = asl(filt_offset, padding) //make FFF0 instead of 0FFF
} {
in_depth = lsr(in_depth, #5) // 1/32
filt_height = add(filt_height, #-1) //
in_wide_deep_high_256=add(in_wide_deep_high_256, next_in_width_32) //
col_count = out_width //
}
.balign 32
.L_height:
{ ptr_z0 = ptr_zi //
ptr_x0 = ptr_xi //
ptr_zi=add(ptr_zi,next_out_width_depth) //
ptr_xi+=mpyi(next_in_width_depth,stride_height) //
}
.balign 32
.L_width:
{ x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
ptr_x1 = ptr_x0 //[P,0]
loop1(.L_depth, in_depth) //[P,0]
} {
x1.tmp = vmem(ptr_x1+#1) //[0,1]
y1.b = vshuff(x1.b) //[0,1]
ptr_x0 = add(ptr_x0, next_in_width_32) //[P,1]
ptr_w = ptr_wi //restart filter stream
} {
x2.tmp = vmem(ptr_x1+#2) //[0,2]
y2.b = vshuff(x2.b) //[0,2]
ptr_z1 = ptr_z0 //[P,2]
ptr_z0 = add(ptr_z0, next_out_width_32) //[P,2]
} {
z3210.b = vshuff(y0.b) //[0,3]x3210
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,3]move to next pt in same depth position
s10 = vmem(filt_sumi+#0) //[P,3]
loop0(.L_vloop, filt_height) //[P,3]can have a filter of Nx3 stride = 1
} {
u_210.tmp = vmem(ptr_w++#1) //[0,4]
w_210.w = vasl(u_210.w, padding) //[0,4]
z7654.b = vshuff(y1.b) //[0,4]x7654
filt_sum = add(filt_sumi, #128) //[P,4]
} {
zba98.b = vshuff(y2.b) //[0,5]
s01_s00 = vcombine(s10, s10) //[P,5]filter offset * xoffset and bias
s11 = s10 //[P,5]filter offset * xoffset and bias
col_count = add(col_count, #-4) //[P,5]
} {
C0 = #0 //[P, 5]clear suma accumulators
C1 = #0 //[P, 5]
C2 = #0 //[P, 5]
C3 = #0 //[P, 5]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_depth:
.L_vloop:
{ s00.uw += vrmpy(z3210.ub, w_210.ub) //[0,6]filter even output
z54__.w = vasl(z7654.w, s16) //[0,6]
x0.tmp = vmem(ptr_x1+#0) //[1,0]
y0.b = vshuff(x0.b) //[1,0]
} {
s10.uw += vrmpy(z7654.ub, w_210.ub) //[0,7]z5432
z__76.uw = vlsr(z7654.uw, s16) //[0,7]
x1.tmp = vmem(ptr_x1+#1) //[1,1]
y1.b = vshuff(x1.b) //[1,1]
} {
ww210 = w_210 //[0,8]
z5432.h = vshuffo(z54__.h, z3210.h) //[0,8]
x2.tmp = vmem(ptr_x1+#2) //[1,2]
y2.b = vshuff(x2.b) //[1,2]
} {
C0.uw += vrmpy(z3210.ub, filt_offset.ub) //
C1.uw += vrmpy(z7654.ub, filt_offset.ub) //
z9876.h = vshuffe(zba98.h, z__76.h) //[0,9]
z3210.b = vshuff(y0.b) //[1,3]x3210
} {
s01.uw += vrmpy(z5432.ub, ww210.ub) //[0,10]filter even output
u_210.tmp = vmem(ptr_w++#1) //[1,4]
w_210.w = vasl(u_210.w, padding) //[1,4]
z7654.b = vshuff(y1.b) //[1,4]x7654
} {
s11.uw += vrmpy(z9876.ub, ww210.ub) //[0,11]filter even output
zba98.b = vshuff(y2.b) //[1,5]
ptr_x1 =add(ptr_x1, next_in_width_depth) //[1,3]move to next pt in same depth position
} {
C2.uw += vrmpy(z5432.ub, filt_offset.ub) //
C3.uw += vrmpy(z9876.ub, filt_offset.ub) //
}:endloop0
/* --------------------------------------------------------------------------- */
{ s00.uw += vrmpy(z3210.ub, w_210.ub) //[1,6]filter even output
z54__.w = vasl(z7654.w, s16) //[1,6]
} {
s10.uw += vrmpy(z7654.ub, w_210.ub) //[1,7]z5432
z__76.uw = vlsr(z7654.uw, s16) //[1,7]
z5432.h = vshuffo(z54__.h, z3210.h) //[1,7]
} {
C0.uw += vrmpy(z3210.ub, filt_offset.ub) //
C1.uw += vrmpy(z7654.ub, filt_offset.ub) //
z9876.h = vshuffe(zba98.h, z__76.h) //[1,8]
} {
s00.w = vsub(s00.w, C0.w) //[E,9]
s10.w = vsub(s10.w, C1.w) //[E,9]
s01.uw += vrmpy(z5432.ub, w_210.ub) //[1,9]filter even output
} {
mine.w = vmin(mine.w, s00.w) //[E,8]
maxe.w = vmax(maxe.w, s00.w) //[E,8]
s11.uw += vrmpy(z9876.ub, w_210.ub) //[1,10]filter even output
} {
s00_s.w = vasl(s00.w, zshift) //[E,8]
C2.uw += vrmpy(z5432.ub, filt_offset.ub) //
C3.uw += vrmpy(z9876.ub, filt_offset.ub) //
maxe.w = vmax(maxe.w, s10.w) //[E,9]
} {
s10_s.w = vasl(s10.w, zshift) //[W,9]
s01.w = vsub(s01.w, C2.w) //[E,10]subtract suma
s11.w = vsub(s11.w, C3.w) //[E,10]subtract suma
mine.w = vmin(mine.w, s10.w) //[E,10]
} {
d0.w = vmpye(s00_s.w, vrecip.uh) //[E,11]
maxe.w = vmax(maxe.w, s01.w) //[E,10]
mine.w = vmin(mine.w, s01.w) //[E,11]
} {
d0.w += vmpyo(s00_s.w, vrecip.h):SSR //[E,12]
s01_s.w = vasl(s01.w, zshift) //[E,11]
} {
s11_s.w = vasl(s11.w, zshift) //[E,12]
x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
} {
d1.w = vmpye(s01_s.w, vrecip.uh) //[E,13]
mine.w = vmin(mine.w, s11.w) //[E,13]
maxe.w = vmax(maxe.w, s11.w) //[E,12]
ptr_x1 = ptr_x0 //[P,0]
} {
d1.w += vmpyo(s01_s.w, vrecip.h):SSR //[E,14]
x1.tmp = vmem(ptr_x1+#1) //[0,1]
y1.b = vshuff(x1.b) //[0,1]
ptr_x0 = add(ptr_x0, next_in_width_32) //[P,1]
} {
d2.w = vmpye(s10_s.w, vrecip.uh) //[E,15]
x2.tmp = vmem(ptr_x1+#2) //[0,2]
y2.b = vshuff(x2.b) //[0,2]
} {
d2.w += vmpyo(s10_s.w, vrecip.h):SSR //[E,16]
d1d0.h = vpacke(d1.w, d0.w) //[E,16]
} {
d3.w = vmpye(s11_s.w, vrecip.uh) //[E,17]
z3210.b = vshuff(y0.b) //[0,3]x3210
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,3]move to next pt in same depth position
} {
d3.w += vmpyo(s11_s.w, vrecip.h):SSR //[E,18]
s01 = vmem(filt_sum++#1) //[P,3]
loop0(.L_vloop, filt_height) //[P,3]can have a filter of Nx3 stride = 1
} {
zba98.b = vshuff(y2.b) //[0,5]
s10 = s01 //[P,5]filter offset * xoffset and bias
s11 = s01 //[P,5]filter offset * xoffset and bias
C0 = #0 //[P,5] init suma accumulator
} {
d3d2.h = vpacke(d3.w, d2.w) //[E,20]
C1 = #0 //[P, 6] init suma accumulator
C2 = #0 //[P, 6] init suma accumulator
C3 = #0 //[P, 6] init suma accumulator
} {
u_210.tmp = vmem(ptr_w++#1) //[0,4]
w_210.w = vasl(u_210.w, padding) //[0,4]
z7654.b = vshuff(y1.b) //[0,4]x7654
s00 = s01 //[P,4]filter offset * xoffset and bias
} {
d3_d0.ub = vpack(d3d2.h,d1d0.h):sat //[E,22]
vmem(ptr_z1+#0) = d3_d0.new //[E,22]
ptr_z1 = ptr_z0 //[P,2]
ptr_z0 = add(ptr_z0, next_out_width_32) //[P,2]
}:endloop1 //end depth
/* --------------------------------------------------------------------------- */
{ ptr_x0=sub(ptr_x0,in_wide_deep_high_256) //next inputs
ptr_z0=sub(ptr_z0,out_wide_deep_128) //next output
p0 = cmp.eq(col_count, #0) //
if(!p0.new) jump:t .L_width //
}
/* --------------------------------------------------------------------------- */
{ out_height = add(out_height, #-1) //
col_count = out_width //
p0 = cmp.eq(out_height, #1) //
if(!p0.new) jump:t .L_height //
}
/* --------------------------------------------------------------------------- */
{ loop0(.L_peak, #4) //
c4 = #-4 //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //
ptr_max = memw(sp+#25<<2) //
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //
}:endloop0
{
maxe.w = vmax(maxe.w, maxo.w) //
vmem(ptr_max+#0) = maxe.new //
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
vmem(ptr_max+#1) = mine.new //
}
/* --------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
.size dwconv2dbbb_unsigned_s2_v60_asm, .L_end-dwconv2dbbb_unsigned_s2_v60_asm
#else
/*=============================================================================*/
.text
.file "dwconv2dbbb_unsigned_s2_d32_v60_h.S"
.global dwconv3x3bbb_unsigned_s2_v60_asm
.balign 32
.type dwconv3x3bbb_unsigned_s2_v60_asm, @function
dwconv3x3bbb_unsigned_s2_v60_asm:
/* =========================================================================== */
//h stride assumed 1 vstride 1 or 2 filt width assumed 3 - hstride 2 requires new function
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define filt_sumi r2 //gemsumb
#define ptr_zi r3 //results
#define next_in_row r4 //width*depth*(stride_horz==1)
#define next_in_width_32 r5 //width*32*(stride_horz==1)
#define in_depth r8 //1 total in depth
#define out_width r9 //2is amount of work to be done
#define next_out_row r10 //3next output line amount in bytes
#define out_height r11 //4 number of vertical lines to perform
#define recip_level r12 //5 255 / (MAX - MIN) - used to scale to bytes
#define zshift r13 //6 spare input
#define ptr_max r14 //7 maximum and minum buffer
#define stride_vert r15 //8 vertical stride is an option to save ops
#define filt_offset r16 //9
#define padding r17 //10 padding=0 patter PPPx, else xPPP
#define filt_off210_ r16 //
#define filt_off_210 r17 //
#define out_width_in_depth r9:8 //
#define out_height_next_out_row r11:10//
#define zshift_recip_level r13:12 //
#define stride_vert_ptr_max r15:14 //
#define padding_filt_offset r17:16 //
//-----------------------------------------------------------------
#define s8 r7 //const = 8
#define nrot r6 //rotate amount
#define ptr_x r6 //
#define ptr_x0 r18 //
#define ptr_x1 r19 //
#define ptr_x2 r20 //
#define ptr_z r21 //
#define ptr_w r22 //
#define filt_sum r23 //
#define filt_sum_ptr_w r23:22 //
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0 //
#define woffset v1 //
#define s00 v2 //
#define s01 v3 //
#define s01s00 v3:2 //
#define s10 v4 //
#define s11 v5 //
#define s11s10 v5:4 //
#define sa0 v6 //
#define sa1 v7 //
#define sa1sa0 v7:6 //
#define sa2 v8 //
#define sa3 v9 //
#define sa3sa2 v9:8 //
#define l0w_210 v10 //
#define l1w_210 v11 //
#define l2w_210 v12 //
#define l0z3210 v13 //
#define l1z3210 v14 //
#define l2z3210 v15 //
#define l0z7654 v16 //
#define l1z7654 v16 //
#define l2z7654 v16 //
#define l0zba98 v17 //
#define l1zba98 v17 //
#define l2zba98 v17 //
#define l0z3232 v18 //
#define l1z3232 v18 //
#define l2z3232 v18 //
#define l0z5432 v18 //
#define l1z5432 v18 //
#define l2z5432 v18 //
#define l0z7676 v19 //
#define l1z7676 v19 //
#define l2z7676 v19 //
#define l0z9876 v19 //
#define l1z9876 v19 //
#define l2z9876 v19 //
#define d0 v20 //
#define d1 v21 //
#define d1_d0 v21:20 //
#define d2 v22 //
#define d3 v23 //
#define d3_d2 v23:22 //
#define d1d0 v24 //
#define d3d2 v25 //
#define d3210 v24 //
#define maxe v26 //
#define mine v27 //
#define y00 v16 //
#define y01 v17 //
#define y10 v16 //
#define y11 v17 //
#define y20 v16 //
#define y21 v17 //
#define x00 v30 //
#define x01 v30 //
#define x10 v30 //
#define x11 v30 //
#define x20 v30 //
#define x21 v30 //
#define w0 v30 //
#define w1 v30 //
#define w2 v30 //
#define maxt v2 //
#define mint v3 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ out_width_in_depth = memd(sp+#0) //
out_height_next_out_row = memd(sp+#8) //
sp = add(sp,#-4*8) //
}{
zshift_recip_level = memd(sp+#(16+4*8)) //
stride_vert_ptr_max = memd(sp+#(24+4*8)) //
nop; nop //
}{
memd(sp+#0) = r17:16 //
padding_filt_offset = memd(sp+#(32+4*8)) //
vrecip = vsplat(recip_level) //
}{
memd(sp+#8) = r19:18 //
in_depth = lsr(in_depth, #5) //1/32
p0 = cmp.eq(padding,#0) //
}{
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
filt_offset = vsplatb(filt_offset) //
out_width = lsr(out_width,#2) //
}{
filt_off210_ = asl(filt_offset, #8) //
filt_off_210 = lsr(filt_offset, #8) //
filt_sum_ptr_w = combine(filt_sumi,ptr_wi) //
maxe = vmem(ptr_max+#0) //
}
{ s8 = mux(p0,#0,#8) //if padding = 1 then shift by 8
filt_offset = mux(p0,filt_off_210,filt_off210_)
mine = vmem(ptr_max+#1) //
}
/* --------------------------------------------------------------------------- */
.L_height:
{ ptr_x = ptr_xi //
ptr_z = ptr_zi //
p3 = xor(p3,p3) //
loop1(.L_depth,in_depth) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_depth:
{ woffset = vmem(filt_sum++#1) //read in sum of taps
ptr_x0 = ptr_x //
ptr_x1 = add(ptr_x,next_in_row) //
nop //
}{
x00.tmp = vmem(ptr_x0++#1):nt //
l0z3210.b = vshuff(x00.b) //
ptr_x2 = add(ptr_x1,next_in_row) //
nop //
}{
x10.tmp = vmem(ptr_x1++#1) //
l1z3210.b = vshuff(x10.b) //
loop0(.L_width,out_width) //
nop //
}{
x20.tmp = vmem(ptr_x2++#1) //
l2z3210.b = vshuff(x20.b) //
ptr_x = add(ptr_x,next_in_width_32) //
}{
l0z3210.b = vshuff(l0z3210.b) //
w0.tmp = vmem(ptr_w++#1) //
l0w_210.w = vasl(w0.w, s8) //
}{
l1z3210.b = vshuff(l1z3210.b) //
w1.tmp = vmem(ptr_w++#1) //
l1w_210.w = vasl(w1.w, s8) //
}{
l2z3210.b = vshuff(l2z3210.b) //
w2.tmp = vmem(ptr_w++#1) //
l2w_210.w = vasl(w2.w, s8) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ x00.tmp = vmem(ptr_x0++#1):nt //[0, 0]
y00.b = vshuff(x00.b) //[0, 0]
s00 = woffset //[0, 0]
d3.w = vmpye(s11.w, vrecip.uh) //[0,28]multiply by 1/max
}{
x01.tmp = vmem(ptr_x0++#1):nt //[0, 1]
y01.b = vshuff(x01.b) //[0, 1]
s10 = woffset //[0, 1]
d2.w += vmpyo(s10.w, vrecip.h):SSR //[1,29]multiply by 1/max
}{
l0z7654.b = vshuff(y00.b) //[0, 2]
l0z3232.h = vshuffo(l0z3210.h,l0z3210.h) //[0, 2]
d3.w += vmpyo(s11.w, vrecip.h):SSR //[1,30]multiply by 1/max
}{
l0zba98.b = vshuff(y01.b) //[0, 3]
s00.uw += vrmpy(l0z3210.ub, l0w_210.ub) //[0, 3]
l0z5432.h = vshuffe(l0z7654.h, l0z3232.h) //[0, 3]
}{
s10.uw += vrmpy(l0z7654.ub, l0w_210.ub) //[0, 4]
l0z7676.h = vshuffo(l0z7654.h,l0z7654.h) //[0, 4]
s01 = woffset //[0, 4]
}{
sa0.uw = vrmpy(l0z3210.ub, filt_offset.ub) //[0, 5]z3210filter oddn output
sa2.uw = vrmpy(l0z7654.ub, filt_offset.ub) //[0, 5]z3210filter oddn output
s11 = woffset //[0, 5]
l0z9876.h = vshuffe(l0zba98.h, l0z7676.h) //[0, 5]
}{
s01.uw += vrmpy(l0z5432.ub, l0w_210.ub) //[0, 6]
l0z3210 = l0zba98 //[0, 6]
x10.tmp = vmem(ptr_x1++#1) //[0, 6]
y10.b = vshuff(x10.b) //[0, 6]
}{
s11.uw += vrmpy(l0z9876.ub, l0w_210.ub) //[0, 7]
x11.tmp = vmem(ptr_x1++#1) //[0, 7]
y11.b = vshuff(x11.b) //[0, 7]
}{
sa1.uw = vrmpy(l0z5432.ub, filt_offset.ub) //[0, 8]z3210filter oddn output
sa3.uw = vrmpy(l0z9876.ub, filt_offset.ub) //[0, 8]z3210filter oddn output
l1z7654.b = vshuff(y10.b) //[0, 8]
l1z3232.h = vshuffo(l1z3210.h,l1z3210.h) //[0, 8]
}{
l1zba98.b = vshuff(y11.b) //[0, 9]
s00.uw += vrmpy(l1z3210.ub, l1w_210.ub) //[0, 9]
l1z5432.h = vshuffe(l1z7654.h, l1z3232.h) //[0, 9]
}{
s10.uw += vrmpy(l1z7654.ub, l1w_210.ub) //[0,10]
l1z7676.h = vshuffo(l1z7654.h,l1z7654.h) //[0,10]
}{
sa0.uw += vrmpy(l1z3210.ub, filt_offset.ub) //[0,11]z3210filter oddn output
sa2.uw += vrmpy(l1z7654.ub, filt_offset.ub) //[0,11]z3210filter oddn output
l1z9876.h = vshuffe(l1zba98.h, l1z7676.h) //[0,11]
}{
s01.uw += vrmpy(l1z5432.ub, l1w_210.ub) //[0,12]
l1z3210 = l1zba98 //[0,12]
x20.tmp = vmem(ptr_x2++#1) //[0,12]
y20.b = vshuff(x20.b) //[0,12]
}{
s11.uw += vrmpy(l1z9876.ub, l1w_210.ub) //[0,13]
x21.tmp = vmem(ptr_x2++#1) //[0,13]
y21.b = vshuff(x21.b) //[0,13]
}{
sa1.uw += vrmpy(l1z5432.ub, filt_offset.ub) //[0,14]
sa3.uw += vrmpy(l1z9876.ub, filt_offset.ub) //[0,14]
l2z7654.b = vshuff(y20.b) //[0,14]
l2z3232.h = vshuffo(l2z3210.h,l2z3210.h) //[0,14]
}{
l2zba98.b = vshuff(y21.b) //[0,15]
s00.uw += vrmpy(l2z3210.ub, l2w_210.ub) //[0,15]
l2z5432.h = vshuffe(l2z7654.h, l2z3232.h) //[0,15]
}{
s10.uw += vrmpy(l2z7654.ub, l2w_210.ub) //[0,16]
l2z7676.h = vshuffo(l2z7654.h,l2z7654.h) //[0,16]
}{
sa0.uw += vrmpy(l2z3210.ub, filt_offset.ub) //[0,17]
sa2.uw += vrmpy(l2z7654.ub, filt_offset.ub) //[0,17]
l2z9876.h = vshuffe(l2zba98.h, l2z7676.h) //[0,17]
}{
s01.uw += vrmpy(l2z5432.ub, l2w_210.ub) //[0,18]
l2z3210 = l2zba98 //[0,18]
d1d0.h = vpack(d1.w, d0.w):sat //[1,31]take lower 16bits of rnded acc
}{
sa1.uw += vrmpy(l2z5432.ub, filt_offset.ub) //[0,19]
sa3.uw += vrmpy(l2z9876.ub, filt_offset.ub) //[0,19]
d3d2.h = vpack(d3.w, d2.w):sat //[1,32]take lower 16bits of rnded acc
}{
s11.uw += vrmpy(l2z9876.ub, l2w_210.ub) //[0,20]
s01s00.w = vsub(s01s00.w, sa1sa0.w) //[0,20]
}{
maxe.w = vmax(maxe.w, s00.w) //[0,21]find max
mine.w = vmin(mine.w, s00.w) //[0,21]find min
s00.w = vasl(s00.w,zshift) //[0,21]
s10.w = vsub(s10.w, sa2.w) //[0,21]
}{
maxe.w = vmax(maxe.w, s01.w) //[0,22]find max
mine.w = vmin(mine.w, s01.w) //[0,22]find min
s01.w = vasl(s01.w,zshift) //[0,22]
s11.w = vsub(s11.w, sa3.w) //[0,22]
}{
d0.w = vmpye(s00.w, vrecip.uh) //[0,23]multiply by 1/max
maxe.w = vmax(maxe.w, s10.w) //[0,23]find max
mine.w = vmin(mine.w, s10.w) //[0,23]find min
}{
d1.w = vmpye(s01.w, vrecip.uh) //[0,24]multiply by 1/max
maxe.w = vmax(maxe.w, s11.w) //[0,24]find max
mine.w = vmin(mine.w, s11.w) //[0,24]find min of acc
}{
d0.w += vmpyo(s00.w, vrecip.h):SSR //[0,25]multiply by 1/max
s10.w = vasl(s10.w,zshift) //[0,25]
}{
d1.w += vmpyo(s01.w, vrecip.h):SSR //[0,26]multiply by 1/max
s11.w = vasl(s11.w,zshift) //[0,26]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[1,33]deal into sequence
if p3 vmem(ptr_z++#1):nt = d3210.new //[1,33]store quantized bytes
}{
d2.w = vmpye(s10.w, vrecip.uh) //[0,27]multiply by 1/max
p3 = or(p3,!p3) //
}:endloop0:endloop1
/* --------------------------------------------------------------------------- */
{ d3.w = vmpye(s11.w, vrecip.uh) //[0,28]multiply by 1/max
}{
d2.w += vmpyo(s10.w, vrecip.h):SSR //[1,29]multiply by 1/max
out_height = add(out_height, #-1) //
}{
d3.w += vmpyo(s11.w, vrecip.h):SSR //[1,30]multiply by 1/max
p0 = cmp.eq(out_height, #0) //
}{
d1d0.h = vpack(d1.w, d0.w):sat //[1,31]take lower 16bits of rnded acc
ptr_xi+=mpyi(stride_vert,next_in_row) //incrmeent input ptr
ptr_zi=add(ptr_zi,next_out_row) //incrmeent output ptr
}{
d3d2.h = vpack(d3.w, d2.w):sat //[1,32]take lower 16bits of rnded acc
filt_sum_ptr_w = combine(filt_sumi,ptr_wi) //
}{
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[1,33]deal into sequence
vmem(ptr_z+#0):nt = d3210.new //[1,33]store quantized bytes
if(!p0) jump .L_height //next line
}
/* --------------------------------------------------------------------------- */
{ vmem(ptr_max+#0) = maxe //store max
r17:16 = memd(sp+#0) //restore stack
}{
vmem(ptr_max+#1) = mine //store min
r19:18 = memd(sp+#8) //restore
}{
/* --------------------------------------------------------------------------- */
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
sp = add(sp,#4*8) // pop stack
jumpr r31 //return
}
.L_end:
.size dwconv3x3bbb_unsigned_s2_v60_asm, .L_end-dwconv3x3bbb_unsigned_s2_v60_asm
#endif
|
XiaoMi/nnlib | 19,828 | hexagon/asm_src/dwconv2dhhh_MxN_h.S | /*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if 0
void dwconv2dhhh_cn(
uint16_t *in_buf,
int16_t *filt,
uint16_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int *bias_sum,
int *max,
int recip_level) {
int out_y, d, out_x, in_val, filt_val;
int out_z, filt_y, filt_x, tsum;
int64_t sum;
for (out_y = 0; out_y < out_height; out_y++) {
for (out_x = 0; out_x < out_width; out_x++) {
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
sum = bias_sum[32*d+out_z];
for (filt_y = 0; filt_y < 3; filt_y++) {
for (filt_x = 0; filt_x < 3; filt_x++) {
in_val = in_buf[(out_y + filt_y) * next_in_width + d * next_in_width_32 + (out_x + filt_x) * 32 + out_z];
filt_val = filt[32*d*3*4 + (filt_x + 4*filt_y)*32 + out_z] ; //0122,0122,0122
sum += in_val*filt_val;
}
}
tsum = sat(sum);
max[0] = (tsum > max[0]) ? tsum : max[0];
max[32] = (tsum < max[32]) ? tsum : max[32];
sum = sum * (int64_t)recip_level + 0x40000000LL;
sum = sum >> 31;
if(sum < 0) sum = 0; if(sum > 0xffffll) sum = 0xffffll;
out_buf[out_y * next_out_width + 32 * out_x + d * next_out_width_32 + out_z] = (uint16_t) (sum>>2);
}
}
}
}
return;
}
#endif
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dhhh_MxN_h.S"
.global dwconv2dhhh_MxN_asm
.balign 32
.type dwconv2dhhh_MxN_asm, @function
dwconv2dhhh_MxN_asm:
/* ----------------------------------------------------------------------------- */
//CALLING REGS
#define in_buf r0 //
#define filt r1 //
#define out_buf r2 //
#define next_in_width_depth r3 //
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //
#define next_out_width_32 r10 //
#define depth r17 //
#define in_depth r11 //
#define out_width r12 //
#define out_height r13 //
#define filt_width r24 //
#define filt_height r25 //
#define bias_sum r14 //
#define ptr_max r15 //
#define recip_level r16 //
#define recip_shift r8 //
#define stride_v_h r28 //scratch holds packed vert and horz stride
//SCALER REGS
#define ptr_w0 r16 //
#define ptr_w1 r17 //
#define c64 r7 //
#define c4 r6 //
#define s8 r9 //
#define bias_ptr r18 //
#define ptr_x0 r19 //
#define ptr_x1 r20 //
#define ptr_x2 r21 //
#define ptr_xin r22 //
#define ptr_y r23 //
#define out_depth_cnt r26 //
#define width_cnt r27 //
#define filt_size r8 //
#define next_in_width_depth_stride r4 //
#define stride_h r28 //
//VECTOR REGS
#define vec128 v1 //
#define w01_w00 v0 //
#define x01_00 v6 //
#define x03_02 v7 //
#define x02_01 v8 //
#define xh01_h00_xl01_l00 v21:20 //
#define xh01_h00 v21 //
#define xl01_l00 v20 //
#define xh02_h01_xl02_l01 v5:4 //
#define xh02_h01 v5 //
#define xl02_l01 v4 //
#define m01l_m00l v11:10 //
#define m01l v11 //
#define m00l v10 //
#define m01h_m00h v13:12 //
#define m01h v13 //
#define m00h v12 //
#define m11l_m10l v15:14 //
#define m11l v15 //
#define m10l v14 //
#define m11h_m10h v3:2 //
#define m11h v3 //
#define m10h v2 //
#define l1_l0 v11:10 //
#define l1 v11 //
#define l0 v10 //
#define h1_h0 v13:12 //
#define h1 v13 //
#define h0 v12 //
#define vrecip v9 //
#define vshamt_vec v25 //
#define max v16 //
#define min v17 //
#define d0 v29 //
#define d1 v30 //
#define d1d0 v28 //
#define bias_val v18 //
#define s0 v26 //
#define s1 v27 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug scaler reg
/* ----------------------------------------------------------------------------- */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
c4 = #-4 //
s8 = #128 //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
vec128 = vsplat(s8) //
s8 = #8 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
depth = memw(sp+#17<<2) //
} {
memd(sp+#40) = r27:26 //
next_out_width_32 = memw(sp+#16<<2) //
depth = zxth(depth) //
in_depth = lsr(depth, #21) //in depth / 32
} {
depth = lsr(depth, #5) //out depth/32
out_width = memw(sp+#18<<2) //
out_height = memw(sp+#19<<2) //
} {
out_depth_cnt = depth //
recip_shift = memw(sp+#25<<2) //
filt_width = memw(sp+#20<<2) //
out_width = add(out_width, #1) //
} {
filt_height = memw(sp+#21<<2) //
bias_sum = memw(sp+#22<<2) //
filt_width = add(filt_width, #1) //round up to even
out_width = lsr(out_width, #1) //
} {
vshamt_vec= vsplat(recip_shift) //
ptr_max = memw(sp+#23<<2) //
recip_level = memw(sp+#24<<2) //
filt_width = lsr(filt_width, #1) //divide by 2
} {
memw(sp+#17<<2) = depth //
vrecip = vsplat(recip_level) //
stride_v_h = memw(sp+#26<<2) //
filt_size = mpyi(filt_width, filt_height) //
} {
c64 = zxth(stride_v_h) //horizontal stride
next_in_width_depth_stride=lsr(stride_v_h,#16) //
filt_size = asl(filt_size, #7) //
filt_width = add(filt_width, #-1) //account fopr sware pipeline
} {
p0 = cmp.eq(c64, #1) //iss stride 1
next_in_width_depth_stride=mpyi(next_in_width_depth_stride, next_in_width_depth) //
stride_h = #256 //
max = vmem(ptr_max+#0) //
} {
memw(sp+#48) = in_depth //
c64 = mux(p0, #64, #0) //0 for s2 64 for s1
if(p0) stride_h = #128 //
min = vmem(ptr_max+#1) //
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{ bias_ptr = bias_sum //[HEIGHT]
ptr_xin = in_buf //[HEIGHT]
ptr_w0 = filt //[HEIGHT]
out_height = add(out_height, #-1) //[HEIGHT]
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{ bias_val = vmem(bias_ptr++#1) //[DEPTH]
ptr_x0 = ptr_xin //[DEPTH]
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
in_depth = add(in_depth, #-1) //[DEPTH]
} {
m00l = vec128 //[WIDTH]
ptr_y = out_buf //[DEPTH]
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
ptr_x1 = ptr_x0 //[WIDTH]
} {
loop1(.L_vert, filt_height) //[WIDTH]
m01l = vec128 //[WIDTH]
ptr_w1 = ptr_w0 //[WIDTH]
width_cnt = out_width //[WIDTH]
} {
loop0(.L_horz, filt_width) //[0, P]
x01_00 = vmemu(ptr_x1+#0) //[0, P]16bits, 64 values, d32
ptr_x2 = add(ptr_x1, #128) //[0, P]
} {
x03_02 = vmemu(ptr_x2++#1) //[0, 0]16bits, 64 values, d32
m01h = #0 //[WIDTH]
ptr_x0 = add(ptr_x0, stride_h) //[WIDTH]advance by 2*stride 32 depths
} {
xh01_h00_xl01_l00.uh = vzxt(x01_00.ub) //[0, 1]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, P]
m00h = #0 //[WIDTH]
out_depth_cnt = add(out_depth_cnt, #-1) //[DEPTH]
} {
x02_01 = vlalign(x03_02, x01_00, c64) //[0, 2]16bits, 64 values, d32
w01_w00 = vmem(ptr_w1++#1) //[0, 2]
m11l = vec128 //[WIDTH]
m10l = vec128 //[WIDTH]
} {
p1 = cmp.eq(out_depth_cnt, #0) //[DEPTH]
xh02_h01_xl02_l01.uh = vzxt(x02_01.ub) //[0, 3]
m11h_m10h = vcombine(m01h,m01h) //[WIDTH]
p3 = !cmp.eq(r0, r0) //[WIDTH]
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_width: //
.L_vert: //
.L_horz: //
{ m01l_m00l.w += vmpy(w01_w00.h,xl01_l00.uh) //[0, 4]64,128
x01_00 = x03_02 //[0, 4]
x03_02 = vmemu(ptr_x2++#1) //[1, 0]16bits, 64 values, d32
} {
m01h_m00h.w += vmpy(w01_w00.h,xh01_h00.uh) //[0, 5]64,128
xh01_h00_xl01_l00.uh = vzxt(x01_00.ub) //[1, 1]
} {
m11l_m10l.w += vmpy(w01_w00.h,xl02_l01.uh) //[0, 6]64,128
x02_01 = vlalign(x03_02, x01_00, c64) //[1, 2]16bits, 64 values, d32
w01_w00 = vmem(ptr_w1++#1) //[1, 2]
} {
w01_w00.tmp= vmem(ptr_w1+#-2) //[0, 7]
m11h_m10h.w += vmpy(w01_w00.h,xh02_h01.uh) //[0, 7]64,128
xh02_h01_xl02_l01.uh = vzxt(x02_01.ub) //[1, 3]
}:endloop0
/* ----------------------------------------------------------------------------- */
{ loop0(.L_horz, filt_width) //[0, P]
x01_00 = vmemu(ptr_x1+#0) //[0, P]16bits, 64 values, d32
ptr_x2 = add(ptr_x1, #128) //[0, P]
} {
m01l_m00l.w += vmpy(w01_w00.h,xl01_l00.uh) //[1, 4]64,128
x03_02 = vmemu(ptr_x2++#1) //[0, 0]16bits, 64 values, d32
} {
m01h_m00h.w += vmpy(w01_w00.h,xh01_h00.uh) //[1, 5]64,128
xh01_h00_xl01_l00.uh = vzxt(x01_00.ub) //[0, 1]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, P]
} {
m11l_m10l.w += vmpy(w01_w00.h,xl02_l01.uh) //[1, 6]64,128
x02_01 = vlalign(x03_02, x01_00, c64) //[0, 2]16bits, 64 values, d32
w01_w00 = vmem(ptr_w1++#1) //[0, 2]
} {
w01_w00.tmp = vmem(ptr_w1+#-2) //[1, 7]
m11h_m10h.w += vmpy(w01_w00.h,xh02_h01.uh) //[1, 7]64,128
xh02_h01_xl02_l01.uh = vzxt(x02_01.ub) //[0, 3]
}:endloop1
/* ----------------------------------------------------------------------------- */
{ m00l.w = vasr(m00l.w, s8) //WIDTH, E]
ptr_w1 = ptr_w0 //[WIDTH, P]
ptr_x1 = ptr_x0 //[WIDTH, P]
p2 = cmp.eq(in_depth, #0) //has depth mpy couted down to 0
} {
m00h.w = vadd(m00h.w, m00l.w) //WIDTH, E]
m01l.w = vasr(m01l.w, s8) //WIDTH, E]
x01_00 = vmemu(ptr_x1+#0) //[0, P]16bits, 64 values, d32
} {
m01h.w = vadd(m01h.w, m01l.w) //WIDTH, E]
m10l.w = vasr(m10l.w, s8) //WIDTH, E]
ptr_x0 = add(ptr_x0, stride_h) //[WIDTH, P]advance by 2*stride 32 depths
ptr_x2 = add(ptr_x1, #128) //[0, P]
} {
m10h.w = vadd(m10h.w, m10l.w) //WIDTH, E]
m11l.w = vasr(m11l.w, s8) //WIDTH, E]
x03_02 = vmemu(ptr_x2++#1) //[0, 0]16bits, 64 values, d32
} {
l1_l0 = vshuff(m01h,m00h, c4) //[WIDTH, E]
m11h.w = vadd(m11h.w, m11l.w) //[WIDTH, E]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, P]
} {
l0.w = vadd(l0.w, l1.w) //[WIDTH, E]
m01l = vec128 //[WIDTH, P]
xh01_h00_xl01_l00.uh = vzxt(x01_00.ub) //[0, 1]
} {
s0.w = vadd(l0.w, bias_val.w) //[WIDTH, E]
h1_h0 = vshuff(m11h,m10h, c4) //[WIDTH, E]
m00l = vec128 //[WIDTH, P]
} {
h0.w = vadd(h0.w, h1.w) //[WIDTH, E]
loop1(.L_vert, filt_height) //[WIDTH, P]
x02_01 = vlalign(x03_02, x01_00, c64) //[0, 2]16bits, 64 values, d32
} {
d1d0.uh = vpack(d1.w, d0.w):sat //[WIDTH, E]
s0.w = vasl(s0.w, vshamt_vec.w) //[WIDTH, E]
s1.w = vadd(h0.w, bias_val.w) //[WIDTH, E]
w01_w00 = vmem(ptr_w1++#1) //[0, 2]
} {
m01h = #0 //[WIDTH, P]
m00h = #0 //[WIDTH, P]
m11l = vec128 //[WIDTH, P]
m10l = vec128 //[WIDTH, P]
} {
s1.w = vasl(s1.w, vshamt_vec.w) //[WIDTH, E]
d0.w = vmpye(s0.w, vrecip.uh) //[WIDTH, E]
if(p3) vmemu(ptr_y++#1) = d1d0 //[WIDTH, E]
} {
xh02_h01_xl02_l01.uh = vzxt(x02_01.ub) //[0, 3]
d0.w += vmpyo(s0.w, vrecip.h):SSR //[WIDTH, E]
width_cnt = add(width_cnt, #-1) //[WIDTH, E]
} {
p0 = cmp.eq(width_cnt, #0) //[WIDTH, E]
max.w = vmax(max.w, d0.w) //[WIDTH, E]
d1.w = vmpye(s1.w, vrecip.uh) //[WIDTH, E]
m11h = #0 //[WIDTH, P]
} {
min.w = vmin(min.w, d0.w) //[WIDTH, E]
d1.w += vmpyo(s1.w, vrecip.h):SSR //[WIDTH, E]
m10h = #0 //[WIDTH, P]
p3 = cmp.eq(r0, r0) //[WIDTH]
} {
max.w = vmax(max.w, d1.w) //[WIDTH, E]
min.w = vmin(min.w, d1.w) //[WIDTH, E]
if(!p0) jump .L_width //[WIDTH, E]
}//end width
/* ----------------------------------------------------------------------------- */
{
d1d0.uh = vpack(d1.w, d0.w):sat //[WIDTH, E]
if(p2) in_depth = memw(sp+#48) //reload mpy counter
if(p2) ptr_xin = in_buf //[HEIGHT]
} {
ptr_w0 = add(ptr_w0, filt_size) //[DEPTH]filt_size = filt_height*256 //4*3*64
vmemu(ptr_y+#0) = d1d0 //[WIDTH, E]
if(!p1) jump .L_depth //[DEPTH]
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //[HEIGHT]
in_buf=add(in_buf,next_in_width_depth_stride)//HEIGHT stride
out_depth_cnt = memw(sp+#17<<2) //[HEIGHT]out depth
if(!p0.new) jump:t .L_height //[HEIGHT]
}//end height
/* ----------------------------------------------------------------------------- */
{ vmem(ptr_max+#0) = max //save off discovered max
r17:16 = memd(sp+#0) //restore
} {
vmem(ptr_max+#1) = min //save off discovered max
r19:18 = memd(sp+#8) //restore
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dhhh_MxN_asm, .L_end-dwconv2dhhh_MxN_asm
/* ----------------------------------------------------------------------------- */
|
XiaoMi/nnlib | 4,763 | hexagon/asm_src/vmemset_short_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/* ============================================================================ */
.global vmemset_short_asm
.type vmemset_short_asm, @function
.balign 32
vmemset_short_asm:
/* ============================================================================ */
#define dst r0
#define src r1
#define length r2
/* ============================================================================ */
#define dstalign r5
#define end r7
#define sel0 r8
#define kernel r3
#define sel1 r9
#define sel2 r4
#define dsto r10
#define y0 v2
#define vpredp v3
#define vprede v4
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
length = add(length, length) //
{ sel0 = ##0x01010101 //position of qprolog
src = combine(src.L, src.L) //
end = add(length, dst) //last byte of block
} {
qprolog =vsetq(dst) //qprolog vec predicate __|---
y0 = vsplat(src)
sel1 = add(sel0, sel0) //position of modified vec predicates
end = and(end, #127) //alignment of last byte
} {
dstalign = and(dst, #127) //alignment of dst
qepilog = vsetq(end) //setup epilog vec predicate
vpredp = vand(qprolog, sel1) //write prolog pred into vreg
length -= add(end, #-127) //round kernel up to 128 nearest
} {
vprede = vand(qepilog, sel1) //write epilog pred into vreg
qprolog = or(qprolog, !qepilog) //modified proglog if no kernel
length = lsr(length, #7) //kernel in blocks of 64
dstalign = add(dstalign, length) //amount of total data
} {
vpredp|= vand(qprolog, sel0) //store modified prolog
loop0(.L_blocks, length) //start main loop
p2 = cmp.gt(dstalign, #127) //if > 127 dont use modified prolog
if(!p2.new) sel1 = sel0 //dont choose modfied
} {
qprolog = vand(vpredp, sel1) //select the qprolog
qepilog = vand(vprede, sel1) //choose correct qepilog
}
/* ============================================================================ */
.balign 32
.L_blocks:
{
if(!qprolog) vmem(dst++#1) = y0 //do prolog load as part of main loop
qprolog = and(qprolog, !qprolog) //make all subsequent prologs true
}:endloop0
/* ============================================================================ */
{
if(qepilog) vmem(dst+#0) = y0 //store out epilog data
}{
jumpr r31 //return to caller
}
.L_end:
/* ============================================================================ */
.size vmemset_short_asm, .L_end-vmemset_short_asm
/* ============================================================================ */
/* ============================================================================ */
|
XiaoMi/nnlib | 6,900 | hexagon/asm_src/gvmaccb_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gemaccb_asm */
/* */
/* DESCRIPTION */
/* Sum Y matrix vertically and multiply by a_offset */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 07/10/16 modified input tranpose operation*/
/*======================================================================*/
/* IDEAL-CYCLE-COUNT: */
/* -> 32*K/128+13 */
/* */
/* MEMORY */
/* CODESIZE = 352 bytes */
/* STACK = 0 bytes */
/* ASSUMPTIONS */
/* y 128 byte aligned */
/* x is 8byte aligned */
/* K%8=0 M%128=0 */
/* C MODEL */
/* K = Klen | Kstride */
/* M = Mlen | Mstride */
/* write output into blocks width same as size to save memory */
/*======================================================================*/
#if 0
void gvmaccb_cn(uint8 * b, uint8 * c, int K, int M, int8 a_offset) {
int j, k;
int32 sumb;
uint8 b_val;
for (j=0; j < M; j++) {
sumb = 0;
for (k=0; k < K; k++) {
b_val = b[k*M+j];
sumb += b_val ;
}
c[i*M+j] += sumb*a_offset;
}
return;
}
#endif
/*======================================================================*/
.text
.file "gvmaccb_h.S"
.global gvmaccb_asm
.balign 32
.type gvmaccb_asm, @function
gvmaccb_asm:
/*======================================================================*/
#define ptr_y r0 //Y matrix aligned to 128bytes
#define ptr_z r1 //integer accumulation of row of Y * xoffset
#define k r2 //k
#define x_offset r3 //input offset
#define dotp r4 //
#define c16 r5 //
/*======================================================================*/
#define z0 v0 //
#define vx_offset v1
#define y0 v2 //
#define z1 v3 //
#define z2 v4 //
/*======================================================================*/
{
k = lsr(k, #2) //inherent /4
vx_offset = vsplat(x_offset) //replicate words
dotp = ##0x01010101 //
} {
p0 = cmp.eq(x_offset, #0) //
k = add(k, #-1) //
if(p0.new) jump:t .L_pass //
} {
loop0(.L_loopK, k) //[P, 2]ki is k/8 - 1
y0.tmp = vmem(ptr_y++#1) //[P, 0]
z0.uw = vrmpy(y0.ub, dotp.ub) //[P, 0]
c16 = #16 //
}
/*======================================================================*/
.balign 32
.L_loopK:
{
y0.tmp = vmem(ptr_y++#1) //[1, 0]
z0.uw += vrmpy(y0.ub, dotp.ub) //[1, 0]
}:endloop0
/*=======================================================================*/
.L_pass:
{
z1.w = vmpyio(z0.w, vx_offset.h) //do full 32bit
} {
z2 = vmem(ptr_z+#0) //
} {
z1.w = vasl(z1.w, c16) //
} {
z1.w += vmpyie(z0.w, vx_offset.uh) //
} {
z2.w = vadd(z2.w, z1.w) //
vmem(ptr_z+#0) = z2.new //
}{
jumpr r31 //
}
/* ===================================================================== */
.L_end:
.size gvmaccb_asm, .L_end-gvmaccb_asm
|
XiaoMi/nnlib | 21,456 | hexagon/asm_src/gvint16_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Desrciption
Perform 2d integral over activation input of size in_depth. The in_depth blocks are collapsed to 1
int and then the ints are accumulated horizontally and vertically.
*/
/* ------------------------------------------------------------------------------------------ */
.text
.file "gvint16_h.S"
.global gvint_16b
.balign 32
.type gvint_16b, @function
gvint_16b:
/* ------------------------------------------------------------------------------------------ */
#define in_ptre0 r0 //
#define in_ptro0 r1 //
#define out_ptr0 r2 //
#define next_d32_row r3 //width of image physical
#define next_input r4 //mpyi(in_depth, in_width_bytes) logical width of image
#define integral_width r5 //number of integral outputs
#define in_depth r28 //indepth multiple of 32
#define out_height r22 //number of required output rows
#define tmp_buf0 r23 //tmp buffer
#define in_ptro r20 //
#define cntrl r11 //cntrl = ##integral_cntrl
/* ------------------------------------------------------------------------------------------ */
#define in_count r25
#define next_int_width r6 //distance to next output of integral buffer
#define c4 r7 //
#define e1 r17
#define filt_offset e1
#define e2 r18
#define e3 r8
#define e4 r9
#define e5 r10
#define in_ptre r12
#define out_ptr r13
#define out_ptr_1 r14
#define n r15 //loop count
#define tmp_buf r19
#define next_output r21 //jmp required to advance to next lot of computation
/* ------------------------------------------------------------------------------------------ */
#define vq1 q0
#define vq2 q1
#define vq3 q2
#define vq4 q3
#define vq5 q3
#define preds v27
#define perm1 v0
#define perm2 v1
#define perm3 v2
#define perm4 v3
#define perm5 v4
#define perm6 v5
#define delta4 v6
#define delta8 v6
#define delta16 v6
#define delta32 v6
#define delta64 v6
#define delta128 v7
#define vzero v26
#define d3d2d1d0 v15
#define h16g16f16e16h_h16g16f16e16l v11:10
#define h16g16f16e16h v11
#define h16g16f16e16l v10
#define d16c16b16a16h_d16c16b16a16l v31:30
#define d16c16b16a16h v31
#define d16c16b16a16l v30
#define h32g32f32e32h_h32g32f32e32l v13:12
#define h32g32f32e32h v13
#define h32g32f32e32l v12
#define a8a8a8a8 v22
#define b8b8b8b8 v23
#define b8b8a8a8h_b8b8a8a8l v29:28
#define b8b8a8a8h v29
#define b8b8a8a8l v28
#define b16b16a16a16 v25
#define d16d16c16c16 v22
#define f16f16e16e16 v14
#define h16h16g16g16 v16
#define d32c32b32a32 v18
#define h32g32f32e32 v19
#define intw31w00 v20
#define inty31y00 v24
#define prev_line v21
#define c8c8c8c8 v8
#define d8d8c8c8l c8c8c8c8
#define d8d8d8d8 v9
#define d8d8c8c8h d8d8d8d8
#define d8d8c8c8h_d8d8c8c8l v9:8
/* --------------------------------------------------------------------------------------- */
{ allocframe(#4*8) //
c4 = #-4 //
cntrl = add(pc,##integral_control@PCREL) //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
vzero = #0 //
n = lsr(integral_width, #5) //integral_width / 32
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
n = add(n, #-1) //
} {
tmp_buf0 = memw(sp+#12<<2) //
M0 = next_d32_row //
next_int_width = asl(integral_width, #2) //line to line in integral
} {
out_height = memw(sp+#11<<2) //
preds = vmem(cntrl+#6) //
in_ptre = in_ptre0 //
in_ptro = in_ptro0 //
} {
tmp_buf = tmp_buf0 //
perm1 = vmem(cntrl+#0) //
e1 = ##0x01010101 //
} {
vq1 = vand(preds, e1) //
perm2 = vmem(cntrl+#1) //
e2 = add(e1, e1) //
in_depth = memw(sp+#10<<2) //
} {
vq2 = vand(preds, e2) //
perm3 = vmem(cntrl+#2) //
e3 = add(e2, e2) //
p2 = and(p2, !p2) //disable p2 for previous line
} {
vq3 = vand(preds, e3) //
perm4 = vmem(cntrl+#3) //
in_depth = lsr(in_depth, #5) //how many 32byte chunks to sum
in_ptro0 = add(in_ptro0, next_input) //
} {
e4 = add(e3, e3) //
perm5 = vmem(cntrl+#4) //
next_output = add(next_input, #-256) //jump 256 bytes right doinf 2 at once
integral_width = lsr(integral_width, #3) //64 * 32/ 256 amount of tmp values
} {
integral_width = add(integral_width, #-1) //after pad to left side
e5 = add(e4, e4) //select vpred 5
perm6 = vmem(cntrl+#5) //last word across whole re
tmp_buf = tmp_buf0 //
} {
a8a8a8a8 = #0 //init accumulators
b8b8b8b8 = #0 //init accumulators
c8c8c8c8 = #0 //
d8d8d8d8 = #0 //
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_height:
//sum up and pad integral prepend 8 lines to the integral buffer and 8 elements in front
{ vmem(tmp_buf++#1) = vzero //store 32 sums of 8*inz*filtz*in_depth/32 pre-pad
in_ptre = in_ptre0 //
in_ptre0 = add(in_ptre0, next_input) //[P, 3]update to next indepth*in_width
loop1(.L_width, integral_width) //setup tmp horz loop
}
.L_width:
{ a8a8a8a8 = #0 //init accumulators
b8b8b8b8 = #0 //init accumulators
loop0(.L_sum, in_depth) //set up inner loop of horz sum
c8c8c8c8 = #0 //
}
.L_sum:
{ d3d2d1d0.cur = vmem(in_ptre+#1) //col even
b8b8b8b8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
} {
d3d2d1d0.cur = vmem(in_ptre++M0) //col odd
a8a8a8a8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
} {
d3d2d1d0.cur = vmem(in_ptro+#1) //col even
d8d8d8d8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
} {
d3d2d1d0.cur = vmem(in_ptro++M0) //col odd
c8c8c8c8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4
}:endloop0
{
loop0(.L_sum, in_depth) //reset inner loop
b8b8a8a8h_b8b8a8a8l = vdeal(b8b8b8b8, a8a8a8a8, c4) //expand words to add in a tree
a8a8a8a8 = #0 //reset accumulator
b8b8b8b8 = #0 //reset accumulator
} {
in_ptre = sub(in_ptre, next_output) //next set of outputs
in_ptro = sub(in_ptro, next_output) //next set of outputs
b8b8a8a8h.w = vadd(b8b8a8a8h.w, b8b8a8a8l.w) //32sums of 8 final block into tmp
d8d8c8c8h_d8d8c8c8l = vdeal(d8d8d8d8, c8c8c8c8, c4) //expand words to add in a tree
} {
d8d8c8c8h.w = vadd(d8d8c8c8h.w, d8d8c8c8l.w) //32sums of 8 final block into tmp
} {
b8b8a8a8h.w += vasl(d8d8c8c8h.w,e4) //
vmem(tmp_buf++#1) = b8b8a8a8h.new //final tmp store
c8c8c8c8 = #0 //
d8d8d8d8 = #0 //
}:endloop1
/* --------------------------------------------------------------------------------------- */
.L_integrate:
{ b16b16a16a16= vmem(tmp_buf0+#0) //[P, 0]load 32sums of 8
} {
tmp_buf = add(tmp_buf0, #256) //[P, 0]update ptr by 2 vregs
} {
d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[0, 1]load 32sums of 8
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[0, 1]deal out for 32sums of 16
} {
f16f16e16e16= vmem(tmp_buf++#2) //[0, 2]load 32sums of 8
} {
out_ptr = out_ptr0 //[P, 3]setup out pointer
delta128 = #0 //[P, 3]initialize add sum to next vreg
} {
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[0, 4]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[0, 4]
} {
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[0, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[0, 5]32sums of 16
} {
out_ptr_1 = sub(out_ptr, next_int_width) //previous line
b16b16a16a16= vmem(tmp_buf++#2) //[0, 6]
} {
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[0, 7]
} {
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[0, 8]32 x 32wordsums
loop0(.L_loop_int, n) //[P, 8]
}
/* --------------------------------------------------------------------------------------- */
{ d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[1, 0]
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[1, 0]
} {
delta4 = vdelta(intw31w00, perm1) //[0,10]add words
f16f16e16e16= vmem(tmp_buf++#2) //[1, 1]
} {
if(vq1) intw31w00.w += delta4.w //[0,11]groups of 1word 1_1_
} {
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[1, 3]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[1, 3]
} {
delta8 = vdelta(intw31w00, perm2) //[0,13]2words
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[1, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[1, 5]32sums of 16
} {
if(vq2) intw31w00.w += delta8.w //[0,14]groups of 2words 11__11__
b16b16a16a16= vmem(tmp_buf++#2) //[1, 4]
} {
inty31y00 = intw31w00 //[0,16]
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[1, 7]
} {
delta16 = vdelta(intw31w00, perm3) //[0,17]
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[1, 8]32 x 32wordsums
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_loop_int:
{ if(vq3) inty31y00.w += delta16.w //[0,18]/groups of 4words 1111____1111____
d16d16c16c16.tmp= vmem(tmp_buf+#-1) //[2, 0]
d16c16b16a16h_d16c16b16a16l= vdeal(d16d16c16c16, b16b16a16a16, c4) //[2, 0]
} {
vq4 = vand(preds, e4) //[0,19]
delta4 = vdelta(intw31w00, perm1) //[1,10]add words
f16f16e16e16= vmem(tmp_buf++#2) //[2, 1]
} {
delta32 = vdelta(inty31y00, perm4) //[0,20]
prev_line = vmem(out_ptr_1++#1) //[0,20]
if(vq1) intw31w00.w += delta4.w //[1,11]groups of 1word 1_1_
} {
if(vq4) inty31y00.w += delta32.w //[0,21]groups of 8words 11111111________
h16h16g16g16.tmp= vmem(tmp_buf+#-1) //[2, 3]
h16g16f16e16h_h16g16f16e16l= vdeal(h16h16g16g16,f16f16e16e16, c4) //[2, 3]
} {
vq5 = vand(preds, e5) //[0,22]
delta8 = vdelta(intw31w00, perm2) //[1,13]2words
b16b16a16a16= vmem(tmp_buf++#2) //[2, 4]
} {
delta64 = vdelta(inty31y00, perm5) //[0,23]
if(vq2) intw31w00.w += delta8.w //[1,14]groups of 2words 11__11__
h32g32f32e32.w = vadd(h16g16f16e16h.w,h16g16f16e16l.w) //[2, 5]32 x 16wordsums
d32c32b32a32.w = vadd(d16c16b16a16h.w,d16c16b16a16l.w) //[2, 5]32sums of 16
} {
delta128 = vdelta(delta128, perm6) //[0,24]full replication of last word
if(vq5) inty31y00.w += delta64.w //[0,24]groups of16words 111--111___--___
if(!p2) prev_line = vzero //[0,24]
} {
delta128.w = vadd(inty31y00.w, delta128.w) //[0,25]add previous last value
inty31y00 = intw31w00 //[1,16]
h32g32f32e32h_h32g32f32e32l= vdeal(h32g32f32e32,d32c32b32a32, c4) //[2, 7]
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[0,26]
vmem(out_ptr++#1) = prev_line.new //[0,26]
delta16 = vdelta(intw31w00, perm3) //[1,17]
intw31w00.w = vadd(h32g32f32e32h.w, h32g32f32e32l.w) //[2, 8]32 x 32wordsums
}:endloop0
/* --------------------------------------------------------------------------------------- */
{ if(vq3) inty31y00.w += delta16.w //[2,18]/groups of 4words 1111____1111____
} {
vq4 = vand(preds, e4) //[2,19]
out_ptr0 = add(out_ptr0, next_int_width) //[E,19]go to next output line
} {
delta32 = vdelta(inty31y00, perm4) //[2,20]
prev_line = vmem(out_ptr_1++#1) //[2,20]
} {
if(vq4) inty31y00.w += delta32.w //[2,21]groups of 8words 11111111________
out_height = add(out_height, #-1) //
} {
vq5 = vand(preds, e5) //[2,22]
p0 = cmp.eq(out_height, #0) //
} {
delta64 = vdelta(inty31y00, perm5) //[2,23]
tmp_buf = tmp_buf0 //
} {
delta128 = vdelta(delta128, perm6) //[2,24]full replication of last word
if(vq5) inty31y00.w += delta64.w //[2,24]groups of16words 111--111___--___
if(!p2) prev_line = vzero //[1,24]
} {
delta128.w = vadd(inty31y00.w, delta128.w) //[2,25]add previous last value
p2 = cmp.eq(r0,r0) //enable p2
in_ptro = in_ptro0 //
in_ptro0 = add(in_ptro0, next_input) //
} {
prev_line.w = vadd(prev_line.w, delta128.w) //[2,26]
vmem(out_ptr++#1) = prev_line.new //[2,26]
if(!p0) jump:t .L_height //[E,26]
d8d8d8d8 = #0 //
}
/* --------------------------------------------------------------------------------------- */
{ r17:16 = memd(sp+#0)
r19:18 = memd(sp+#8)
} {
r21:20 = memd(sp+#16)
r23:22 = memd(sp+#24)
} {
dealloc_return
}
/* --------------------------------------------------------------------------------------- */
.L_end:
.size gvint_16b, .L_end-gvint_16b
|
XiaoMi/nnlib | 22,848 | hexagon/asm_src/dwconv2dbbb_s2_5xN_h.S | /*
* Copyright (c) 2016,2017,2018 The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : dwconv2dbbb_s2_5xN_asm */
/* */
/* DESCRIPTION */
/* Depthwise filter stride 2xM, filter size 5xN */
/* input and output ptr non aligned output width */
/* padded, max and min found only on valid range */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 4.30.19 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = 880 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* none */
/* C MODEL */
/* exact match to assembly code */
/*======================================================================*/
#if 0
void dwconv2dhhh_s2_cn(
uint8_t *in_buf,
uint8_t *filt,
uint8_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int filt_height,
int filt_zero,
int32_t *bias_sum,
int32_t *max,
int recip_level,
int recip_shift,
int stride_height)
{
int out_y, d, out_x, ur, in_val, filt_val;
int out_z, filt_y, filt_x, cnt;
int out_width_pad = (out_width+3)&(~3);
int64_t lsum;
int32_t sum, zum, sum0;
int filt_width = 5;
int o_filt_width = (filt_width+3)&(~3);
for (out_y = 0; out_y < out_height; out_y++) {
cnt = out_width;
for (out_x = 0; out_x < out_width_pad; out_x+=4) {
cnt -= 4;
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
for(ur=0; ur < 4; ur++)
{
zum = 0;
sum = bias_sum[32*d+out_z];
for (filt_y = 0; filt_y < filt_height; filt_y++) {
for (filt_x = 0; filt_x < o_filt_width; filt_x++) {
in_val = in_buf[(out_y*stride_height + filt_y) * next_in_width
+ d * next_in_width_32
+ (out_x*2 + ur*2 + filt_x) * 32
+ out_z];
filt_val = filt[32*d*filt_height*o_filt_width +
(o_filt_width*filt_y)*32 +
out_z*4 + 128*(filt_x/4) + (filt_x & 3)] ;
sum += in_val*filt_val;
if(filt_x < filt_width)
zum += in_val*filt_zero;
}
}
sum = sum - zum;
if(ur==0)sum0 = sum;
if(ur == 1 && !(cnt > -3)) sum = sum0;
if(ur == 2 && !(cnt > -2)) sum = sum0;
if(ur == 3 && !(cnt > -1)) sum = sum0;
sum <<= recip_shift;
lsum = (int64_t)sum * (int64_t)recip_level + 0x40000000LL;
lsum = lsum >> 31;
sum = (int) lsum;
if(sum > max[out_z ]) max[out_z ] = sum;
if(sum < max[out_z+32]) max[out_z+32] = sum;
if(lsum < 0) lsum = 0; if(lsum > 0xffll) lsum = 0xffll;
out_buf[out_y * next_out_width
+ 32 * (out_x+ur)
+ d * next_out_width_32
+ out_z] = (uint8_t) lsum;
}//ur
}//out_z
}//d
}//out_x
}//out_y
return;
}
#endif
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s2_5xN_h.S"
.global dwconv2dbbb_s2_5xN_asm
.balign 32
.type dwconv2dbbb_s2_5xN_asm, @function
dwconv2dbbb_s2_5xN_asm:
/* ----------------------------------------------------------------------------- */
//input output registers
#define in_buf r0 //
#define filt r1 //
#define out_buf r2 //
#define next_in_width_depth r3 //
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //
#define next_out_width_32 r10 //
#define depth r11 //
#define out_width r12 //
#define out_height r13 //
#define filt_height r25 //
#define filt_zero r7 //
#define bias_sum r14 //
#define ptr_max r15 //
#define recip_level r10 //
#define recip_shift r8 //
#define stride_v r28 //
//scaler registers
#define ptr_w0 r16 //
#define ptr_w1 r17 //
#define col_count r9 //
#define c16 r6 //
#define bias_ptr r18 //
#define ptr_x0 r19 //
#define ptr_x1 r20 //
#define ptr_xin r22 //
#define ptr_y r23 //
#define depth_cnt r26 //
#define filt_size r11 //
#define next_in_width_depth_stride r28 //
#define zzzz r7 //
#define ___z r27 //
#define _zzz r21 //
#define zzz_ r4 //
#define zz__ r15 //
#define out_width4 r24 //
//vector registers
#define vrecip v0 //
#define vshamt_vec v1 //
#define max v2 //
#define min v3 //
#define bias_val v4 //
#define w_432 v27 //
#define w___4 v8 //
#define w3210 v28 //
#define w10__ v7 //
#define x0 v30 //
#define x1 v29 //
#define x2 v5 //
#define x3x1x2x0 v30 //
#define x3x2x1x0 v30 //
#define x7x5x6x4 v29 //
#define x7x6x5x4 v10 //
#define xbxax9x8 v5 //
#define xbx9xax8 v5 //
#define s0 v12 //
#define s1 v13 //
#define s2 v14 //
#define s3 v15 //
#define z0 v16 //
#define z1 v17 //
#define z2 v18 //
#define z3 v19 //
#define d0 v20 //
#define d1 v21 //
#define d1d0 v24 //
#define d2 v22 //
#define d3 v23 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
c16= #16 //
out_width = memw(sp+#2<<2) //
} {
M0 = next_in_width_depth //
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
out_width4 = add(out_width, #3) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
out_width4 = lsr(out_width4, #2) //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
stride_v = memw(sp+#26<<2) //
filt_zero = memw(sp+#21<<2) //
} {
zzzz = vsplatb(filt_zero) //
next_out_width_32 = memw(sp+#16<<2) //
} {
next_in_width_depth_stride = mpyi(next_in_width_depth,stride_v) //
_zzz = lsr(zzzz, #8) //
depth = memw(sp+#17<<2) //
} {
out_height = memw(sp+#19<<2) //
ptr_max = memw(sp+#23<<2) //
depth = lsr(depth, #5) //depth/32
} {
memw(sp+#17<<2) = depth //
max = vmem(ptr_max+#0) //
___z = lsr(_zzz, #16) //
} {
min = vmem(ptr_max+#1) //
zz__ = asl(zzzz, #16) //
} {
filt_size = memw(sp+#20<<2) //
recip_shift = memw(sp+#25<<2) //
} {
filt_height = add(filt_size, #-1) //
bias_sum = memw(sp+#22<<2) //
depth_cnt = memw(sp+#17<<2) //depth
vshamt_vec= vsplat(recip_shift) //
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{
recip_level = memw(sp+#24<<2)
}
{ bias_ptr = bias_sum //
ptr_xin = in_buf //
ptr_w0 = filt //
loop1(.L_width, out_width4) //
} {
out_height = add(out_height, #-1) //
col_count = out_width //
x2 = vmemu(ptr_xin+#2) //[0, 0]
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{
vrecip = vmem(recip_level++#1)
ptr_x0 = ptr_xin //
ptr_y = out_buf //
memw(sp+#52) = recip_level
} {
x1 = vmemu(ptr_xin+#1) //[0, 1]
} {
xbx9xax8.b = vshuff(x2.b) //[0, 2]
bias_val = vmem(bias_ptr++#1) //
p3 = !cmp.eq(r0, r0) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ w3210.cur = vmem(ptr_w0+#0) //[0, 3]
w_432.uw = vlsr(w3210.uw, c16) //[0, 5]
xbxax9x8.b = vshuff(xbx9xax8.b) //[0, 5]
} {
w___4.cur = vmem(ptr_w0+#1) //[0, 4]
w_432.w += vasl(w___4.w, c16) //[0, 6]
x7x5x6x4.b = vshuff(x1.b) //[0, 6]
ptr_x1 = ptr_x0 //[WIDTH, P]
} {
z2.uw = vrmpy(xbxax9x8.ub, ___z.ub) //[0, 7]
z3.uw = vrmpy(xbxax9x8.ub, _zzz.ub) //[0, 7]
x0 = vmemu(ptr_x1++M0) //[0, 7]
} {
s3.uw = vrmpy(xbxax9x8.ub, w_432.ub) //[0, 8]
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[0, 8]
ptr_x0 = add(ptr_x0, #256) //[WIDTH,P]+8 32 depths for stride 2
col_count = add(col_count, #-4) //[WIDTH]
} {
s2.uw = vrmpy(xbxax9x8.ub, w___4.ub) //[0, 9]
x3x1x2x0.b = vshuff(x0.b) //[0, 9]
s0 = bias_val //[WIDTH, P]
s3.w = vadd(s3.w, bias_val.w) //[WIDTH, P]
} {
s2.w = vadd(s2.w, bias_val.w) //[WIDTH, P]
s0.uw += vrmpy(x7x6x5x4.ub, w___4.ub) //[0,10]
loop0(.L_vert, filt_height) //[WIDTH, P]
ptr_w1 = add(ptr_w0, #256) //[WIDTH, P]
} {
z0.uw = vrmpy(x7x6x5x4.ub, ___z.ub) //[0,11]
z1.uw = vrmpy(x7x6x5x4.ub, _zzz.ub) //[0,11]
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[0,11]
s1 = bias_val //[WIDTH, P]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_vert:
{ s2.uw += vrmpy(x7x6x5x4.ub, w3210.ub) //[0,12]
x2 = vmemu(ptr_x1+#2) //[1, 0]
} {
s1.uw += vrmpy(x7x6x5x4.ub, w_432.ub) //[0,13]
x1 = vmemu(ptr_x1+#1) //[1, 1]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[0,14]
w10__.w = vasl(w3210.w, c16) //[0,14]
xbx9xax8.b = vshuff(x2.b) //[1, 2]
} {
z2.uw+= vrmpy(x7x6x5x4.ub, zzzz.ub) //[0,15]
z3.uw+= vrmpy(x7x6x5x4.ub, zz__.ub) //[0,15]
w3210 = vmem(ptr_w1++#1) //[1, 3]
} {
s3.uw += vrmpy(x7x6x5x4.ub, w10__.ub) //[0,16]
w___4 = vmem(ptr_w1++#1) //[1, 4]
} {
s1.uw += vrmpy(x3x2x1x0.ub, w10__.ub) //[0,17]
w_432.uw = vlsr(w3210.uw, c16) //[1, 5]
xbxax9x8.b = vshuff(xbx9xax8.b) //[1, 5]
} {
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[0,18]
z1.uw += vrmpy(x3x2x1x0.ub, zz__.ub) //[0,18]
w_432.w += vasl(w___4.w, c16) //[1, 6]
x7x5x6x4.b = vshuff(x1.b) //[1, 6]
} {
z2.uw += vrmpy(xbxax9x8.ub, ___z.ub) //[1, 7]
z3.uw += vrmpy(xbxax9x8.ub, _zzz.ub) //[1, 7]
x0 = vmemu(ptr_x1++M0) //[1, 7]
} {
s3.uw += vrmpy(xbxax9x8.ub, w_432.ub) //[1, 8]
x7x6x5x4.b = vshuff(x7x5x6x4.b) //[1, 8]
} {
s2.uw += vrmpy(xbxax9x8.ub, w___4.ub) //[1, 9]
x3x1x2x0.b = vshuff(x0.b) //[1, 9]
} {
s0.uw += vrmpy(x7x6x5x4.ub, w___4.ub) //[1,10]
} {
z0.uw+= vrmpy(x7x6x5x4.ub, ___z.ub) //[1,11]
z1.uw+= vrmpy(x7x6x5x4.ub, _zzz.ub) //[1,11]
x3x2x1x0.b = vshuff(x3x1x2x0.b) //[1,11]
}:endloop0
/* --------------------------------------------------------------------------- */
{ s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[1,14]
p0 = !cmp.gt(col_count, #-2) //
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
} {
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[1,18]
z2.uw += vrmpy(x7x6x5x4.ub, zzzz.ub) //[1,15]
} {
s2.uw += vrmpy(x7x6x5x4.ub, w3210.ub) //[1,12]
w10__.w = vasl(w3210.w, c16) //[1,14]
s0.w = vsub(s0.w, z0.w) //[WIDTH]
} {
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
s2.w = vsub(s2.w, z2.w) //[WIDTH,E]
s1.uw += vrmpy(x7x6x5x4.ub, w_432.ub) //[1,13]
} {
if(p0) s2 = s0
z1.uw += vrmpy(x3x2x1x0.ub, zz__.ub) //[1,18]
z3.uw += vrmpy(x7x6x5x4.ub, zz__.ub) //[1,15]
s0.w = vasl(s0.w, vshamt_vec.w) //
} {
if(p3) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]
s3.uw += vrmpy(x7x6x5x4.ub, w10__.ub) //[1,16]
} {
s2.w = vasl(s2.w, vshamt_vec.w) //[WIDTH]
s1.uw += vrmpy(x3x2x1x0.ub, w10__.ub) //[1,17]
s3.w = vsub(s3.w, z3.w) //[WIDTH]
p0 = !cmp.gt(col_count, #-3) //[WIDTH]
} {
d0.w = vmpye(s0.w, vrecip.uh) //[WIDTH]multiply by 1/max
s1.w = vsub(s1.w, z1.w) //[WIDTH]
x2 = vmemu(ptr_x0+#2) //[P, 0]
} {
d0.w += vmpyo(s0.w, vrecip.h):SSR //[WIDTH]
s3.w = vasl(s3.w, vshamt_vec.w) //[WIDTH]
if(p0) s1 = s0 //[WIDTH] if over gen'd write valid val
p0 = !cmp.gt(col_count, #-1) //[WIDTH]
} {
if(p0) s3 = s0 //[WIDTH]
d2.w = vmpye(s2.w, vrecip.uh) //[WIDTH]multiply by 1/max
} {
d2.w += vmpyo(s2.w, vrecip.h):SSR //[WIDTH]
min.w = vmin(min.w, d0.w) //[WIDTH]
max.w = vmax(max.w, d0.w) //[WIDTH]
} {
min.w = vmin(min.w, d2.w) //[WIDTH]
s1.w = vasl(s1.w, vshamt_vec.w) //WIDTH
d3.w = vmpye(s3.w, vrecip.uh) //[WIDTH]multiply by 1/max
} {
max.w = vmax(max.w, d2.w) //[WIDTH]
d3.w += vmpyo(s3.w, vrecip.h):SSR //[WIDTH]
x1 = vmemu(ptr_x0+#1) //[P, 1]
} {
max.w = vmax(max.w, d3.w) //[WIDTH]
min.w = vmin(min.w, d3.w) //[WIDTH]
d1.w = vmpye(s1.w, vrecip.uh) //[WIDTH]multiply by 1/max
} {
p3 = cmp.eq(r0, r0) //[WIDTH]
d1.w += vmpyo(s1.w, vrecip.h):SSR //[WIDTH]
xbx9xax8.b = vshuff(x2.b) //[P, 2]
} {
max.w = vmax(max.w, d1.w) //[WIDTH]
min.w = vmin(min.w, d1.w) //[WIDTH]
}:endloop1 //end width
/* --------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
col_count = out_width //[DEPTH,P]
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
next_out_width_32 = memw(sp+#16<<2)
} {
d1d0.h = vpack(d1.w, d0.w):sat //[WIDTH,E]
ptr_w0 += asl(filt_size, #8) //[DEPTH,E]filt_size = filt_height*256 //4*3*64
loop1(.L_width, out_width4) //[DEPTH]
} {
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
x2 = vmemu(ptr_xin+#2) //[P, 0]
} {
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
recip_level = memw(sp+#52)
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]save final activations
if(!p0) jump .L_depth //[DEPTH,E]next depth
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //have all rows been processed
depth_cnt = memw(sp+#17<<2) //reload depth count
in_buf=add(in_buf,next_in_width_depth_stride)//vertical stride for input ptr
if(!p0.new) jump:nt .L_height //next height
}//end height
/* ----------------------------------------------------------------------------- */
ptr_max = memw(sp+#23<<2) //ptr to max and mins
{
r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //save max vec
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //save min vec
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s2_5xN_asm, .L_end-dwconv2dbbb_s2_5xN_asm
/* ----------------------------------------------------------------------------- */
/* =========================================================================== */ |
XiaoMi/nnlib | 8,962 | hexagon/asm_src/biasadd_relu_requant.S |
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*
* Now that that's out of the way, let's get to the good stuff.
*
* This contains definitions for things used internally.
*/
.text
#if 0
static inline void biasadd_relu_requant_hvx(
uint8_t *out,
const int32_t *tmp_out,
const int32_t *biasbuf,
const uint32_t num_patches,
const uint32_t depth,
const uint32_t fixed_recip_level_size)
{
int32_t sum;
int32_t i,j;
int32_t depth_in_vectors = depth / 32;
int32_t outval;
/* do 4 vectors */
/* multiply */
/* pack odd halves */
/* saturate and pack */
/* deal */
/* deal */
/* store a vector */
for (j = 0; j < num_patches; j++) {
for (i = 0; i < depth; i++) {
sum = biasbuf[i] + tmp_out[j*depth+i];
outval = sum * fixed_recip_level_size + (1<<15);
outval >>= 16;
if (outval < 0) outval = 0;
if (outval > 255) outval = 255;
*out++ = outval;
}
}
}
#endif
#define OUTPTR r0
#define INPTR r1
#define BIASBUF r2
#define NUM_PATCHES r3
#define DEPTH r4
#define RECIP_LEVEL_SIZE r5
#define BIASBUF_REWIND_ADDR r6
#define BIASPTR r7
#define ITERS r8
#define ROUNDAMT_R r9
#define L2FETCH_CTL r11:10
#define L2FETCH_CTL_LO r10
#define L2FETCH_CTL_HI r11
#define BUF0 v0
#define BUF1 v1
#define BUF2 v2
#define BUF3 v3
#define BBUF v9
#define VMPYAMT v8
#define HBUF1 v7
#define HBUF0 v6
#define BIASVALS v5
#define OUT0 v10
#define OUT1 v11
#define OUT2 v12
#define OUT3 v13
.global biasadd_relu_requant_hvx
.p2align 6
biasadd_relu_requant_hvx:
{
L2FETCH_CTL_HI = asl(DEPTH,#4)
ITERS = mpyi(NUM_PATCHES,DEPTH)
}
{
VMPYAMT = vsplat(RECIP_LEVEL_SIZE)
ITERS = add(ITERS,#255) // round up, +1 iter made soem mods for multithreading
ROUNDAMT_R = ##0x08000
}
{
ITERS = lsr(ITERS,#7) // divide by 128
BIASPTR = BIASBUF
NUM_PATCHES = lsr(NUM_PATCHES,#2) // 4x inputs...
}
{
OUT0 = vsplat(ROUNDAMT_R)
OUT1 = vsplat(ROUNDAMT_R)
L2FETCH_CTL_LO = combine(L2FETCH_CTL_HI.L,NUM_PATCHES.L)
}
{
OUT2 = vsplat(ROUNDAMT_R)
OUT3 = vsplat(ROUNDAMT_R)
}
{
l2fetch(INPTR,L2FETCH_CTL)
BIASBUF_REWIND_ADDR = addasl(BIASBUF,DEPTH,#2)
}
{
p3=sp1loop0(.Loop,ITERS)
}
.falign
.Loop:
{
BIASVALS = vmem(BIASPTR++#1)
OUT0.w += vmpyie(BUF0.w,VMPYAMT.uh)
}
{
BUF0 = vmem(INPTR++#1):nt
p0 = cmp.eq(BIASBUF_REWIND_ADDR,BIASPTR)
if (p0.new) BIASPTR = BIASBUF
OUT1.w += vmpyie(BUF1.w,VMPYAMT.uh)
}
{
BIASVALS = vmem(BIASPTR++#1)
BUF0.w = vadd(BUF0.w,BIASVALS.w)
OUT2.w += vmpyie(BUF2.w,VMPYAMT.uh)
}
{
BUF1 = vmem(INPTR++#1):nt
p0 = cmp.eq(BIASBUF_REWIND_ADDR,BIASPTR)
if (p0.new) BIASPTR = BIASBUF
OUT3.w += vmpyie(BUF3.w,VMPYAMT.uh)
}
{
BIASVALS = vmem(BIASPTR++#1)
OUT1 = vsplat(ROUNDAMT_R)
BUF1.w = vadd(BUF1.w,BIASVALS.w)
HBUF0.h = vpacko(OUT1.W,OUT0.W)
}
{
BUF2 = vmem(INPTR++#1):nt
p0 = cmp.eq(BIASBUF_REWIND_ADDR,BIASPTR)
if (p0.new) BIASPTR = BIASBUF
HBUF1.h = vpacko(OUT3.W,OUT2.W)
}
{
BIASVALS = vmem(BIASPTR++#1)
OUT2 = vsplat(ROUNDAMT_R)
OUT3 = vsplat(ROUNDAMT_R)
BUF2.w = vadd(BUF2.w,BIASVALS.w)
}
{
BUF3.tmp = vmem(INPTR++#1):nt
BUF3.w = vadd(BUF3.w,BIASVALS.w)
p0 = cmp.eq(BIASBUF_REWIND_ADDR,BIASPTR)
if (p0.new) BIASPTR = BIASBUF
}
{
OUT0 = vsplat(ROUNDAMT_R)
if (p3) vmem(OUTPTR++#1)=BBUF.new
BBUF.ub = vpack(HBUF1.h,HBUF0.h):sat
}:endloop0
jumpr r31
#ifdef BIST
.global main
main:
r0 = syscfg
r0 = setbit(r0,#7)
syscfg = r0
r0 = ssr
r0 = setbit(R0,#31)
r0 = setbit(r0,#29)
ssr = r0
r0 = ##out
r1 = ##indata
r2 = ##biasvals
r3 = #4
r4 = #32
r5 = ##0x0000FFFF
call biasadd_relu_requant_hvx
1:
jump 1b
.data
.align 128
indata:
.word 0
.word 1
.word 2
.word 3
.word 4
.word 5
.word 6
.word 7
.word 8
.word 9
.word 10
.word 11
.word 12
.word 13
.word 14
.word 15
.word 16
.word 17
.word 18
.word 19
.word 20
.word 21
.word 22
.word 23
.word 24
.word 25
.word 26
.word 27
.word 28
.word 29
.word 30
.word 31
.word 32
.word 33
.word 34
.word 35
.word 36
.word 37
.word 38
.word 39
.word 40
.word 41
.word 42
.word 43
.word 44
.word 45
.word 46
.word 47
.word 48
.word 49
.word 50
.word 51
.word 52
.word 53
.word 54
.word 55
.word 56
.word 57
.word 58
.word 59
.word 60
.word 61
.word 62
.word 63
.word 64
.word 65
.word 66
.word 67
.word 68
.word 69
.word 70
.word 71
.word 72
.word 73
.word 74
.word 75
.word 76
.word 77
.word 78
.word 79
.word 80
.word 81
.word 82
.word 83
.word 84
.word 85
.word 86
.word 87
.word 88
.word 89
.word 90
.word 91
.word 92
.word 93
.word 94
.word 95
.word 96
.word 97
.word 98
.word 99
.word 100
.word 101
.word 102
.word 103
.word 104
.word 105
.word 106
.word 107
.word 108
.word 109
.word 110
.word 111
.word 112
.word 113
.word 114
.word 115
.word 116
.word 117
.word 118
.word 119
.word 120
.word 121
.word 122
.word 123
.word 124
.word 125
.word 126
.word 127
biasvals:
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
out:
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
#endif
|
XiaoMi/nnlib | 17,495 | hexagon/asm_src/gvconv3322bbb_h.S | .text
.file "gvconv3322bbb_h.S"
.global conv3322bbb
.balign 64
.type conv3322bbb, @function
conv3322bbb:
/* =========================================================================== */
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_bias r2 //bias
#define ptr_zi r3 //results
#define out_width r4 // amount of work to be done
#define out_height r5 // number of vertical lines to perform
#define recip_level r6 //
#define zshift r7 // spare input
#define zshift_recip_level r7:6
#define filt_offset r8 //
#define in_next_row r9 //
#define in_next_row_filt_offset r9:8 //
//-----------------------------------------------------------------
#define f0d0_01 r10
#define f0d0_x2 r11
#define f0d0_x210 r11:10
#define f0d1_01 r12
#define f0d1_x2 r13
#define f0d1_x210 r13:12
#define f1d0_01 r14
#define f1d0_x2 r15
#define f1d0_x210 r15:14
#define f1d1_01 r16
#define f1d1_x2 r17
#define f1d1_x210 r17:16
#define f2d0_01 r18
#define f2d0_x2 r19
#define f2d0_x210 r19:18
#define f2d1_01 r20
#define f2d1_x2 r21
#define f2d1_x210 r21:20
#define f0d0_2x r22
#define f0d1_2x r23
#define f1d0_2x r24
#define f1d1_2x r25
#define f2d0_2x r26
#define f2d1_2x r27
#define lpcount0 r28
#define iptr0 ptr_wi
#define optr ptr_bias
#define bias0 f0d0_01
#define bias1 f0d0_x2
#define bias1_bias0 f0d0_x210
#define inc f0d1_01
#define n_out_width recip_level
//-----------------------------------------------------------------
#define x00 v0
#define x02 v1
#define x02x00 v1:0
#define x10 v2
#define x12 v3
#define x12x10 v3:2
#define x20 v4
#define x22 v5
#define x22x20 v5:4
#define x0n v6
#define x1n v7
#define x2n v8
#define vrecip v9
#define s00 v10
#define s01 v11
#define s01s00 v11:10
#define s10 v12
#define s11 v13
#define s11s10 v13:12
#define sa0 v14
#define sa1 v15
#define sa1sa0 v15:14
#define sum00 v16
#define sum01 v17
#define sum01sum00 v17:16
#define sum10 v18
#define sum11 v19
#define sum11sum10 v19:18
#define d00 v20
#define d01 v21
#define d10 v22
#define d11 v23
#define vbias0 v24
#define vbias1 v25
#define zeros v26
#define ones v27
#define mask v27
#define mask0 v28
#define mask1 v29
#define yout_a d10
#define yout_b d11
#define y0 d00
#define y1 d01
#define yout y0
//-----------------------------------------------------------------
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ zshift_recip_level = memd(sp+#0) //
in_next_row_filt_offset = memd(sp+#8) //
lpcount0 = add(out_width,#63) //
out_width = asl(out_width,#1) //
} {
bias1_bias0 = memd(ptr_bias+#0) //
sp = add(sp,#-6*8) //
inc = #128 //
zeros = #0 //
} {
memd(sp+#0 ) = r17:16 //
memd(sp+#8 ) = r19:18 //
inc -= asl(in_next_row,#1)
m0 = in_next_row
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
lpcount0 = lsr(lpcount0,#6)
m1 = inc
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
filt_offset = vsplatb(filt_offset) //
vrecip = vsplat(recip_level) //
} {
n_out_width = neg(out_width) //
vbias0 = vsplat(bias0) //
vbias1 = vsplat(bias1) //
ones = vnot(zeros) //
} {
f0d0_x210 = memd(ptr_wi+#0) //
f0d1_x210 = memd(ptr_wi+#8) //
loop1(.L_height,out_height) //
mask = valign(zeros,ones,n_out_width) //
} {
f1d1_x210 = memd(ptr_wi+#24) //
f1d0_x210 = memd(ptr_wi+#16) //
f0d0_2x = combine(f0d0_x2.l,f0d0_x2.h) //
f0d1_2x = combine(f0d1_x2.l,f0d1_x2.h) //
} {
f2d0_x210 = memd(ptr_wi+#32) //
f2d1_x210 = memd(ptr_wi+#40) //
f1d0_2x = combine(f1d0_x2.l,f1d0_x2.h) //
f1d1_2x = combine(f1d1_x2.l,f1d1_x2.h) //
} {
f2d0_2x = combine(f2d0_x2.l,f2d0_x2.h) //
f2d1_2x = combine(f2d1_x2.l,f2d1_x2.h) //
iptr0 = ptr_xi
} {
nop; nop; nop //
}
.L_height:
{ x00 = vmem(iptr0++m0) //
optr = ptr_zi //
p3 = sp1loop0(.L_width,lpcount0) //
nop //
} {
x10 = vmem(iptr0++m0) //
ptr_zi = add(ptr_zi,out_width) //
ptr_xi = add(ptr_xi,in_next_row) //
} {
x2n = vmem(iptr0++m1) //
}
.balign 64
.L_width:
{ x0n.cur = vmem(iptr0++m0) //[1]
x02 = valign(x0n,x00,#4) //[1]
s00.uw += vrmpy(x22.ub,f2d0_x2.ub) //[2]
s01.uw += vrmpy(x22.ub,f2d0_2x.ub) //[2]
} {
s10.uw += vrmpy(x22.ub,f2d1_x2.ub) //[2]
s11.uw += vrmpy(x22.ub,f2d1_2x.ub) //[2]
sum01sum00.w = vsub(s01s00.w,sa1sa0.w) //[2]
} {
sa1sa0.uw = vrmpy(x02x00.ub,filt_offset.ub,#0) //[1]
sum11sum10.w = vsub(s11s10.w,sa1sa0.w) //[2]
} {
x1n.cur = vmem(iptr0++m0) //[1]
x12 = valign(x1n,x10,#4) //[1]
s01s00.uw = vrmpy(x02x00.ub,f0d0_01.ub,#0) //[1]
} {
s11s10.uw = vrmpy(x02x00.ub,f0d1_01.ub,#0) //[1]
x20 = x2n //[1]
sum00.w = vasl(sum00.w,zshift) //[2]
} {
sa0.uw += vrmpy(x02.ub,filt_offset.ub) //[1]
sa1.uw += vrmpy(x02.ub,filt_offset.ub) //[1]
sum01.w = vasl(sum01.w,zshift) //[2]
} {
sum10.w = vasl(sum10.w,zshift) //[2]
d00.w = vmpye(sum00.w, vrecip.uh) //[2]
} {
sum11.w = vasl(sum11.w,zshift) //[2]
d01.w = vmpye(sum01.w, vrecip.uh) //[2]
} {
x2n.cur = vmem(iptr0++m1) //[1]
x22 = valign(x2n,x20,#4) //[1]
d10.w = vmpye(sum10.w, vrecip.uh) //[2]
} {
s00.w = vadd(s00.w,vbias0.w) //[1]
s01.w = vadd(s01.w,vbias0.w) //[1]
d11.w = vmpye(sum11.w, vrecip.uh) //[2]
} {
s10.w = vadd(s10.w,vbias1.w) //[1]
s11.w = vadd(s11.w,vbias1.w) //[1]
d00.w += vmpyo(sum00.w, vrecip.h):SSR //[2]
} {
d01.w += vmpyo(sum01.w, vrecip.h):SSR //[2]
} {
d10.w += vmpyo(sum10.w, vrecip.h):SSR //[2]
} {
d11.w += vmpyo(sum11.w, vrecip.h):SSR //[2]
} {
s00.uw += vrmpy(x02.ub,f0d0_x2.ub) //[1]
s01.uw += vrmpy(x02.ub,f0d0_2x.ub) //[1]
y0.h = vsat(d01.w,d00.w) //[2]
} {
s10.uw += vrmpy(x02.ub,f0d1_x2.ub) //[1]
s11.uw += vrmpy(x02.ub,f0d1_2x.ub) //[1]
y1.h = vsat(d11.w,d10.w) //[2]
} {
sa1sa0.uw += vrmpy(x12x10.ub,filt_offset.ub,#0) //[1]
x00 = x0n //[1]
} {
s01s00.uw += vrmpy(x12x10.ub,f1d0_01.ub,#0) //[1]
yout.ub = vsat(y1.h,y0.h) //[2]
} {
s11s10.uw += vrmpy(x12x10.ub,f1d1_01.ub,#0) //[1]
} {
sa0.uw += vrmpy(x12.ub,filt_offset.ub) //[1]
sa1.uw += vrmpy(x12.ub,filt_offset.ub) //[1]
} {
s00.uw += vrmpy(x12.ub,f1d0_x2.ub) //[1]
s01.uw += vrmpy(x12.ub,f1d0_2x.ub) //[1]
} {
s10.uw += vrmpy(x12.ub,f1d1_x2.ub) //[1]
s11.uw += vrmpy(x12.ub,f1d1_2x.ub) //[1]
x10 = x1n //[1]
} {
sa1sa0.uw += vrmpy(x22x20.ub,filt_offset.ub,#0) //[1]
if p3 vmemu(optr++#1) = yout //[2]
} {
s01s00.uw += vrmpy(x22x20.ub,f2d0_01.ub,#0) //[1]
} {
s11s10.uw += vrmpy(x22x20.ub,f2d1_01.ub,#0) //[1]
} {
sa0.uw += vrmpy(x22.ub,filt_offset.ub) //[1]
sa1.uw += vrmpy(x22.ub,filt_offset.ub) //[1]
}:endloop0
/* --------------------------------------------------------------------------- */
.L_width_end:
{ s00.uw += vrmpy(x22.ub,f2d0_x2.ub) //[2]
s01.uw += vrmpy(x22.ub,f2d0_2x.ub) //[2]
} {
s10.uw += vrmpy(x22.ub,f2d1_x2.ub) //[2]
s11.uw += vrmpy(x22.ub,f2d1_2x.ub) //[2]
sum01sum00.w = vsub(s01s00.w,sa1sa0.w) //[2]
} {
sum11sum10.w = vsub(s11s10.w,sa1sa0.w) //[2]
} {
sum00.w = vasl(sum00.w,zshift) //[2]
} {
sum01.w = vasl(sum01.w,zshift) //[2]
} {
sum10.w = vasl(sum10.w,zshift) //[2]
d00.w = vmpye(sum00.w, vrecip.uh) //[2]
} {
sum11.w = vasl(sum11.w,zshift) //[2]
d01.w = vmpye(sum01.w, vrecip.uh) //[2]
} {
d10.w = vmpye(sum10.w, vrecip.uh) //[2]
} {
d11.w = vmpye(sum11.w, vrecip.uh) //[2]
} {
d00.w += vmpyo(sum00.w, vrecip.h):SSR //[2]
} {
d01.w += vmpyo(sum01.w, vrecip.h):SSR //[2]
} {
d10.w += vmpyo(sum10.w, vrecip.h):SSR //[2]
mask0 = vlalign(mask,zeros,optr) //
} {
d11.w += vmpyo(sum11.w, vrecip.h):SSR //[2]
mask1 = vlalign(zeros,mask,optr) //
} {
y0.h = vsat(d01.w,d00.w) //[2]
iptr0 = ptr_xi //
} {
y1.h = vsat(d11.w,d10.w) //[2]
q1 = vcmp.gt(mask0.ub,zeros.ub) //
} {
yout.ub = vsat(y1.h,y0.h) //[2]
q2 = vcmp.gt(mask1.ub,zeros.ub) //
} {
yout_a = vlalign(yout,zeros,optr) //[2]
} {
yout_b = vlalign(zeros,yout,optr) //[2]
if (q1) vmem(optr++#1) = yout_a //[2]
} {
if (q2) vmem(optr+#0) = yout_b //[2]
}:endloop1
/* --------------------------------------------------------------------------- */
{ r27:26 = memd(sp+#40) //
r25:24 = memd(sp+#32) //
} {
r23:22 = memd(sp+#24) //
r21:20 = memd(sp+#16) //
} {
r19:18 = memd(sp+#8 ) //
r17:16 = memd(sp+#0 ) //
sp = add(sp,#6*8) // pop stack
jumpr r31 //return
}
/*=============================================================================*/
.L_end:
.size conv3322bbb, .L_end-conv3322bbb
/*=============================================================================*/
.text
.global load_indata_d2
.balign 64
.type load_indata_d2, @function
load_indata_d2:
/* =========================================================================== */
#define indata r0
#define in_width r1
#define next_row r2
#define left_pad r3
#define right_pad r4
#define pad_value r5
#define out r6
#define nbytes r7
#define nbytes_out r7:6
//-----------------------------------------------------------------
#define iteration r8
#define rpad_pos r9
#define out_r r10
#define width_roundup r11
#define range r12
#define rpad_pos2 r13
#define outm r14
//-----------------------------------------------------------------
#define x0 v0
#define x1 v1
#define xout v2
#define vpad v3
/*=============================================================================*/
{ nbytes_out = memd(sp+#0) //
range = addasl(indata,in_width,#1) //
pad_value = vsplatb(pad_value) //
rpad_pos = add(in_width,left_pad) //
} {
range = add(range,#127) //
vpad = vsplat(pad_value) //
width_roundup = add(in_width,#63) //
p3 = cmp.gt(right_pad,#0) //
} {
range = and(range,#-128) //
rpad_pos2 = asl(rpad_pos,#1) //
width_roundup = and(width_roundup,#-64) //
x0 = vpad //
} {
range = sub(range,indata) //
q0 = vsetq(rpad_pos2) //
out_r = add(out,rpad_pos2) //
iteration = lsr(width_roundup,#6) //
} {
p0 = cmp.gt(range,nbytes) //
if p0.new iteration = add(iteration,#-1) //
if p0.new width_roundup= add(width_roundup,#-64)//
left_pad = asl(left_pad,#1) //
} {
p1 = cmp.gt(rpad_pos,width_roundup) //
loop0(.L_loop,iteration) //
outm = add(out,left_pad) //
p2 = cmp.gt(iteration,#0) //
} {
if p0 in_width = sub(in_width,width_roundup) //
if !p2 jump .L_loopend //
outm += asl(width_roundup,#1) //
}
//-----------------------------------------------------------------
.balign 64
.L_loop:
{ x1 = vmemu(indata++#1) //
} {
xout = vlalign(x1,x0,left_pad) //
vmem(out++#1) = xout.new //
x0 = x1 //
}:endloop0
//-----------------------------------------------------------------
.L_loopend:
{ xout = vlalign(x1,x0,left_pad) //
if (p1) vmem(out++#1) = xout.new //
if !p3 jump .L_continue //
} {
if (!q0) vmem(out_r+#0) = vpad //
}
.L_continue:
{ if !p0 jumpr r31 //
} {
r0 = outm //
r1 = indata //
r2 = asl(in_width,#1) //
jump memcpy //
}
/*=============================================================================*/
.size load_indata_d2, .-load_indata_d2
/*=============================================================================*/
|
XiaoMi/nnlib | 27,409 | hexagon/asm_src/gvconv2dbbb_d16_s1f_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/* DJ 05/17/17 speciaized version with hstride = 1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_d16_s1f_h_v66.S"
.global gvconv2dbbbs1_d16_v66_asm
.balign 32
.type gvconv2dbbbs1_d16_v66_asm, @function
gvconv2dbbbs1_d16_v66_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xi r0 //data aligned 128
#define ptr_wi r1 //weights aligned 128
#define ptr_zi r2 //results aligned 128
#define in_width r3 //(pad_l+in_width+pad_r) => 4 %4
#define weight_batch_size r3 //
#define weight_batch_size_ptr_zi r3:2
#define out_width_stride_depth r4 //value in bytes to get to next full out row
#define out_width r5 //out_width_pad
#define stride_h_w r26 //0 stride_height|stride_width
#define in_depth r27 //1 %32
#define in_depth_stride_h_w r27:26
#define filt_width r8 //2 >= 1
#define filt_height r9 //3 >= 1filt_height lines per filter
#define filt_height_width r9:8 //
#define out_height r17 //4 >= 1 number of vertical lines to perform
#define ptr_filtsum r27 //5 aligned 128
#define ptr_max r21 //6 aligned 128
#define recip_level r23 //7 recip is 31bit unsigned 0x7f800000000LL / max
#define out_align r1 //8 0, 32, 64, 96
#define skip_col r7 //9
#define out_next_d32 r7 //10
#define nslice r12 //11
#define recip_shamt r12 //12
#define STP r21 //
#define AEQ0 r10 //
/*=============================================================================*/
#define filt_width_in r26 //
#define filt_depth_width r11 //total depth 32 iterations of each h filter
#define in_width_stride_h_depth r28 //in_width * stride_h * in_depth for next output
#define ptr_x0 r6 //
#define ptr_x1 r7 //
#define ptr_x1_ptr_x0 r7:6 //
#define stride_w r18 //stride width =1
#define next_outputs r19 //jump to input ptr for next set of outputs
#define ptr_w r20 //
#define in_width_32 r22 //
#define ptr_z r24 //
#define col_count r25 //
#define filt_cnt r18
#define ptr_x0_ r12
#define ptr_x1_ r13 //
#define ptr_x1_ptr_x0_ r13:12
#define z_ptr r3
#define c8_c96 r15:14
#define c8 r15
#define filt_width512 r16 //corrected to use ptr_w as a counter for fused loop
#define c640 r14
#define lmask r23 //
#define BASE 72
#define PRED3_0 C4 //
/*=============================================================================*/
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
#define PV(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v21 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210 v6 //
#define x3_prev v16 //previous value
#define xout v17 //realigned out
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxe v12 //
#define mine v18 //
#define biasvec v16 //
#define recipvec v15 //
#define recip_sh_vec v20 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
/*=============================================================================*/
{ filt_height_width = memd(sp+#8) //
sp = add(sp,#-BASE) //
out_width = lsr(out_width, #2) //4 outputs at once
} {
memd(sp+#40) = r27:26 //
in_depth_stride_h_w = memd(sp+#(BASE+0)) //
c640 = #640 //
} {
memd(sp+#16) = r21:20 //
ptr_max = memw(sp+#(BASE+24)) //
M1 = c640 //ptr_w ++ #5 equivalence
filt_depth_width = lsr(in_depth, #5) //filt_wiidth * in_depth / 32
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
next_outputs = mpy(filt_height.L,in_depth.L)//filt_height*in_depth
in_width_stride_h_depth = mpy(stride_h_w.H, in_depth.L)
} {
memd(sp+#24) = r23:22 //
recip_level = memw(sp+#(BASE+28)) //
next_outputs = lsr(next_outputs, #5) //filt_height*depth/32
filt_depth_width = mpyi(filt_depth_width, filt_width)//2d filter
} {
memd(sp+#32) = r25:24 //
ptr_w = ptr_wi //[P,0]ptr_y=ptr_yi initialize filter pointer
maxe = vmem(ptr_max+#0) //
} {
memw(sp+#48) = ptr_xi //
in_width_stride_h_depth=mpyi(in_width,in_width_stride_h_depth) //total vertical stride bytes
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
mine = vmem(ptr_max+#1) //
} {
recipvec = vmem(recip_level++#1) //
next_outputs=mpyi(next_outputs,in_width) //filt_height*in_width*in_depth
weight_batch_size = mpyi(filt_depth_width,filt_height)
} {
memw(sp+#(BASE+28)) = recip_level //
r6 = #32 //
recip_shamt = memw(sp+#(BASE+48)) //
} {
out_align = memw(sp+#(BASE+32)) //
filt_depth_width = sub(filt_depth_width, filt_width)//-1 to allow for depth 16 pass
weight_batch_size = asl(weight_batch_size,#10) // 32*filt_width*filt_height*in_depth
filt_width_in = add(filt_width, #-1) //account for peeling 1 iteration off
} {
recip_sh_vec = vsplat(recip_shamt) //
r6 -= lsr(out_align,#2) //
} {
r6 = and(r6, #0x1f) //
r7 = #-1 //
} {
r7 = asl(r7, r6) //
} {
memw(sp+#64) = r7 //
skip_col = memw(sp+#(BASE+36)) //
r6 = #0x1f
} {
r6 &= asl(skip_col, #3) //
r7 = #-1 //
} {
r7 = lsr(r7, r6) //
} {
memw(sp+#68) = r7 //
} {
memw(sp+#60) = weight_batch_size //
filt_width = asl(filt_width, #10) //convert for use in address counter sequence
loop1(.L_filt_h, filt_height) //for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
AEQ0 = cmp.eq(out_align, #0) //if no alignment enable store
} {
next_outputs = add(next_outputs, #-4) //1,2
c8_c96 = combine(#8, #96) //
filt_width = add(filt_width, #-128) //1 cycle off from consumer to generator
filt_width512 = add(filt_width, #-512-512) //account for 512bytes per loop of w + precorrect for loop correction
} {
ptr_filtsum = memw(sp+#(BASE+20)) //ptr pre computed weight sum
filt_depth_width +=add(filt_depth_width, #-1) //as 16 per loop so need 2x; peel 1 iteration off
next_outputs = asl(next_outputs, #5) //(flt_hight*in_width*in_depth/32-4*stride)*32
}
/*=============================================================================*/
.balign 64
.L_depth:
{
ptr_xi = memw(sp+#48) //
filt_cnt = add(filt_width512, ptr_w) //add(filt_width, #-1) //ptr_w)
STP = !cmp.eq(r0, r0) //force p2 off
lmask = memw(sp+#64) //left mask
} {
col_count = out_width //setup first width loop
memw(sp+#52) = ptr_w //save wi for someone else
ptr_x0 = ptr_xi //ptr_xi
wsum = vmem(ptr_filtsum++#1) //
} {
memw(sp+#56) = ptr_zi
ptr_xi= add(ptr_xi,in_width_stride_h_depth) //ptr_x+=in_width*stride_h*in_depth)
ptr_z = add(ptr_zi, #0) //
ptr_x1 = add(ptr_x0, #100) //[Pheight]setup initial pointer
} {
out_height = memw(sp+#(BASE+16)) //number of output lines
z = vmem(ptr_x0+#0) //[Pheight]load 0-127
s3s2 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
s1s0 = vcombine(wsum, wsum) //[P, 0]initialize accumulators
}
/*=============================================================================*/
.balign 64
.L_height:
.L_width:
.L_filt_h:
{ w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
loop0(.L_filt_w, filt_depth_width) //for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
nop //
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
z_ptr= add(ptr_x1,#8) //[0, 1]ptr_x1_=add(ptr_x1,#8)
nop //
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
c8 = #8 //restore c8 to 8 for 1st loop
filt_cnt = add(filt_cnt, #512) //restore filt_cnt to this 1st loop
} {
w0.tmp = vmem(ptr_w++#1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(z_ptr, #4) //[0, 3]reset ptr for next row of filter taps
}
.L_filt_w:
{ w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
p3 = cmp.eq(filt_cnt, ptr_w) //[0, 0]ki is k1/32 - 0
if(p3.new) ptr_x0 = add(ptr_x0, in_width_32)//[0, 0]move to next line ptr_y keeps going
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
if(p3) filt_cnt = add(filt_width, ptr_w) //[0, 1]
ptr_x1_ptr_x0_= vaddw(ptr_x1_ptr_x0,c8_c96) //[0, 1]ptr_x1_=add(ptr_x1,#8)||/ptr_x0_=add(ptr_x0, #96)
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
z_ptr = mux(p3, ptr_x0, ptr_x1_) //[0, 2]
ptr_x1_ = mux(p3, ptr_x0_, ptr_x1_) //[0, 2]
} {
w0.tmp = vmem(ptr_w++#1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(ptr_x1_, #4) //[0, 3]reset ptr for next row of filter taps
}:endloop0
/*=============================================================================*/
{ loop0(.L_filt_d16, filt_width_in) //for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
filt_cnt = add(filt_cnt, #-512)
w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
z_ptr= add(ptr_x1,#24) //[0, 1]ptr_x1_=add(ptr_x1,#8)
nop
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
c8 = #24 //8 + 16 advance decision by 1/2 loop
nop
} {
w0.tmp = vmem(ptr_w++M1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(z_ptr, #4 ) //[0, 3]reset ptr for next row of filter taps
}
.L_filt_d16:
{ w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
p3 = cmp.eq(filt_cnt, ptr_w) //[0, 0]ki is k1/32 - 0
if(p3.new) ptr_x0 = add(ptr_x0, in_width_32)//[0, 0]move to next line ptr_y keeps going
} {
if(p3) filt_cnt = add(filt_width, ptr_w) //[0, 1]
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
ptr_x1_ptr_x0_= vaddw(ptr_x1_ptr_x0,c8_c96) //[0, 1]ptr_x1_=add(ptr_x1,#8)||/ptr_x0_=add(ptr_x0, #96)
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
z_ptr = mux(p3, ptr_x0, ptr_x1_) //[0, 2]
ptr_x1_ = mux(p3, ptr_x0_, ptr_x1_) //[0, 2]
} {
w0.tmp = vmem(ptr_w++M1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(z_ptr+#0) //[0, 3]load 0-127 bytes into z buffer
ptr_x1 = add(ptr_x1_, #4 ) //[0, 3]reset ptr for next row of filter taps
}:endloop0:endloop1 //
/*=============================================================================*/
s0.w = vasl(s0.w, recip_sh_vec.w) //
s1.w = vasl(s1.w, recip_sh_vec.w) //
s2.w = vasl(s2.w, recip_sh_vec.w) //
s3.w = vasl(s3.w, recip_sh_vec.w) //
{
x1.h = vpack(y1.w, y0.w):sat //[E1, 0]packe low 16bits together
loop1(.L_filt_h, filt_height) //for(filt_y=0;filt_y<height*in_depth/32;filt_y++)
} {
PRED3_0 = lmask
lmask = #-1 //
} {
ptr_x0 = sub(ptr_x0, next_outputs) //[E0, 1]reset data ptr to next 4
y0.w = vmpye(s0.w, recipvec.uh) //[E0, 1](s2 * recip + rnd)>>31
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]pack low 16bits together
col_count = add(col_count, #-1) //
} {
y0.w+= vmpyo(s0.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 2]
ptr_w = memw(sp+#52) ////[E0, 5]ptr_w=ptr_wi init filter pointer
} {
if(!p0) y0 = maxe //
} {
maxe.w = vmax(maxe.w, y0.w) //[E0, 0]see if s0 is max
if(!p0) y0 = mine //
} {
mine.w = vmin(mine.w, y0.w) //[E0, 0]see if s0 is min
} {
y1.w = vmpye(s1.w, recipvec.uh) //[E0, 3](s2 * recip + rnd)>>31
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255 and pack low 8bits
ptr_x1 = add(ptr_x0, #100) //4 //setup initial pointer
} {
y1.w+= vmpyo(s1.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 4](s2 * recip + rnd)>>31
} {
if(!p1) y1 = maxe //
} {
maxe.w = vmax(maxe.w, y1.w) //[E0, 3]
if(!p1) y1 = mine //
} {
mine.w = vmin(mine.w, y1.w) //[E0, 4]see if z0 is max
p1 = STP //
} {
y2.w = vmpye(s2.w, recipvec.uh) //[E0, 5](s2 * recip + rnd)>>31
s1s0 = vcombine(wsum, wsum) //[E0, 5]initialize accumulator 0
filt_cnt = add(ptr_w,filt_width512) //using the ptr_w as a counter
} {
y2.w+= vmpyo(s2.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0, 6](s2 * recip + rnd)>>31
xout = vlalign(x3210,x3_prev,out_align) //[E1, 6]
if(p1)vmem(ptr_z++#1):nt = xout.new //[E1, 6]store 2nd 32bytes
} {
if(!p2) y2 = maxe //
p0 = cmp.eq(col_count, #1) //
} {
maxe.w = vmax(maxe.w, y2.w) //[E0, 4]
if(!p2) y2 = mine //
} {
mine.w = vmin(mine.w, y2.w) //[E0, 5]see if z0 is max
p1 = cmp.eq(col_count, #0) //
if(p0) lmask = memw(sp+#68) //right mask
} {
y3.w = vmpye(s3.w, recipvec.uh) //[E0, 7]#(s2 * recip + rnd)>>31
x3_prev = x3210 //[E1, 7]save data for next output align
STP = AEQ0 //[E1, 7]update predicate piplione
AEQ0 = cmp.eq(r0, r0) //[E1, 7]set to true
} {
y3.w+= vmpyo(s3.w, recipvec.h):RSS //<<1:rnd:sat:shift //[E0,8](s2 * recip + rnd)>>31
s3s2 = vcombine(wsum, wsum) //[E0, 8]initialize accumulator 2,3
z = vmem(ptr_x0+#0) //pre load 0-127 for next row of filter
} {
if(!p3) y3 = maxe //
} {
maxe.w = vmax(maxe.w, y3.w) //[E0, 2]
if(!p3) y3 = mine //
} {
mine.w = vmin(mine.w, y3.w) //[E0, 2]see if z0 is max
if(!p1) jump .L_width //
}//cols per line kernel loop width
/*=============================================================================*/
{ x1.h = vpack(y1.w, y0.w):sat //[E1, 0]#>>16
out_height = add(out_height, #-1) //Prolog width
STP = !cmp.eq(r0, r0) //[Pheight]force p2 off
} {
x3.h = vpack(y3.w, y2.w):sat //[E1, 1]#sat8 <0, >255
p1 = !cmp.eq(out_height, #0) //EE
loop1(.L_filt_h, filt_height) //for(filt_y=0;filt_y<height*in_depth/32;filt_y+=1)
} {
ptr_x0 = ptr_xi //Prolog width ptr_xi
AEQ0 = cmp.eq(out_align, #0) //[Pheight]if no alignment enable store
col_count = out_width //outer loop set up width
} {
ptr_xi = add(ptr_xi,in_width_stride_h_depth)//[Pheight]ptr_x+=in_width*stride_h*in_depth)
x3210.ub = vpack(x3.h, x1.h):sat //[E1, 3]#sat8 <0, >255
skip_col = memw(sp+#(BASE+36)) //
} {
p3 = tstbit(skip_col, #2) //[E1, 6] == 4
ptr_zi = add(ptr_zi, out_width_stride_depth)//EEnext out line for this depth segment
ptr_x1 = add(ptr_x0, #100) //[Pheight]setup initial pointer
if (p1) z = vmem(ptr_x0+#0) //[Pheight]load 0-127
} {
xout = vlalign(x3210, x3_prev, out_align) //[E1, 6]
vmem(ptr_z+#0):nt = xout.new //[E1, 6]store 2nd 32bytes
} {
xout = vlalign(x3210, x3210, out_align) //[E1, 7]
if(p3) vmem(ptr_z+#1):nt = xout.new //[E1, 7]flush out last values
ptr_z = add(ptr_zi, #0) //
if (p1) jump:t .L_height //EE
}//end lines per block//last cols per line
/*=============================================================================*/
{
nslice = memw(sp+#(BASE+44)) //
weight_batch_size_ptr_zi = memd(sp+#56) //
} {
nslice = add(nslice,#-1) //
ptr_w = add(ptr_w,weight_batch_size) //
} {
out_next_d32 = memw(sp+#(BASE+40)) //
p1 = cmp.gt(nslice,#0) //
recip_level = memw(sp+#(BASE+28)) //
} {
ptr_zi = add(ptr_zi,out_next_d32) //
if(p1) recipvec = vmem(recip_level++#1) //
} {
memw(sp+#(BASE+28)) = recip_level //
memw(sp+#(BASE+44)) = nslice //update nslice (weight chunks)
if p1 jump .L_depth //
}
/*=============================================================================*/
ptr_max = memw(sp+#(BASE+24)) //
{ r17:16 = memd(sp+#0) //restore r16,r17 from stack
r19:18 = memd(sp+#8) //restore r18,r19
} {
vmem(ptr_max+#0) = maxe //[E, 0]32max
r23:22 = memd(sp+#24) //restore r22,r23
} {
r25:24 = memd(sp+#32) //restore r24,r25
vmem(ptr_max+#1) = mine //[E, 0]32min
} {
r21:20 = memd(sp+#16) //restore r20,r21
r27:26 = memd(sp+#40) //restore r26,r27
sp = add(sp,#BASE) //
jumpr r31 //
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbbs1_d16_v66_asm, .L_end-gvconv2dbbbs1_d16_v66_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 8,239 | hexagon/asm_src/gvmsuma_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmsumimw_asm */
/* */
/* DESCRIPTION */
/* Perform vector sum on input stream, result at 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> 16*K*N/32+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 1040 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%8=0 M%128=0 */
/* C MODEL */
/* N = Nlen */
/* K = Klen | Kstride */
/* M = Mlen | Mstride */
/*======================================================================*/
#if 0
void gvmsumimw_cn(uint8 * a, int * c, int N, int K, uchar y_offset, int z_offset) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
sum += a_val ;
}
c[i*M+j] = sum*y_offset + z_offset;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvmsumimw_h.S"
.global gvmsumimw_asm
.balign 32
.type gvmsumimw_asm, @function
gvmsumimw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_xsum r1 //results
#define out_width r2 //out_width
#define skip_back r3 //skip back to next line wrt the stride, pad and filt_width and depth
#define stride r4 //stride*depth
#define filt_width r5 //filt_width*depth elements in the filter length
#define out_height r6 //number of vertical lines to perform
#define filt_offset r7 //8bit value to be subtracted
#define z_offset r8 //32bit value to be added K*xo*yo
/*=============================================================================*/
#define ki r9 //
#define ptr_x0 r10 //
#define sum r11 //
#define sum1_sum0 r17:16//
#define sum1 r17 //
#define sum0 r16 //
//
#define x07x04x03x00 r13:12//
#define x07x04 r13 //
#define x03x00 r12 //
#define x0fx0cx0bx08 r15:14//
#define x0fx0c r15 //
#define x0bx08 r14 //
/*=============================================================================*/
{
out_height = memw(sp+#0<<2) //
filt_offset = memw(sp+#1<<2) //
} {
z_offset = memw(sp+#2<<2) //extract filt_width*depth
allocframe(#16) //
ki = lsr(filt_width, #4) //k / 16
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
ki = add(ki, #-1) //
}
/*============================================================================*/
.balign 32
.L_height:
{
loop1(.L_width, out_width) //[ , P]for(i=0; i < n; i+=4){
out_height = add(out_height, #-1)
}
.balign 32
.L_width:
{
ptr_x0 = ptr_x
ptr_x = add(ptr_x, stride) //ptr_x += stride
loop0(.L_filt_width, ki) //[P, 9]ki is k1/4 - 2
} {
sum1_sum0 = combine(#0, #0) //
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 0]
x07x04x03x00 = memd(ptr_x0++#16) //[0, 0]
}
.balign 32
.L_filt_width:
{
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1,0]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 1]
x07x04x03x00 = memd(ptr_x0++#16) //[0, 1]
}:endloop0
{
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1,1]
} {
sum0 = add(sum0, sum1)
sum = z_offset
} {
sum += mpyi(sum0, filt_offset) //
} {
memw(ptr_xsum++#1<<2) = sum //[E, ]
}:endloop1
{
ptr_x = add(ptr_x, skip_back) //[E, ]next line
p1 = cmp.eq(out_height, #0)
if(!p1.new) jump:t .L_height
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvmsumimw_asm, .L_end-gvmsumimw_asm
|
XiaoMi/nnlib | 15,557 | hexagon/asm_src/vmemset_2d_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.text
.file "vmemset_2d_h.S"
//
// HVX rectangle fill operation
//
// void vmemset_32_2d_asm(
// void * dst, // location
// int val, // 32_bit value to fill
// int width, // width of rectangle (bytes), >0
// int height, // height of rectangle; rows > 0
// int stride ); // stride of buffer; must be multiple of vector
//
// This operation does any rectangle fill using vector operations; it uses masked
// writes as needed to avoid over-writing portions of the output buffer outside the
// specified area.
// The row pitches must be vector aligned (so whatever strategy is used on one row,
// is used on all others). When the width is small - up to 6 vectors per row -
// there is no 'horizontal' loop, just the height loop.
//
//
// The row-pitch condition is not checked. The wid >0, ht >0 condition is checked, and
// the routine does nothing if this is not the case.
//
// This will be a little smaller/faster for __HEXAGON_ARCH__ >= 62
// (uses vsetq2 for masks)
//
// For a general byte fill, the 'val' should be the same in all 4 byte lanes. (An inline wrapper vmemset_2d_asm can do this)
// If you want a 16-bit fill, put the value in both halves of 'value', and use an even width;
// the l.s. byte of the value will always be stored at the start of each row of the array, even if the pointer
// is odd.
//// Likewise, for 32-bit fill, supply the value as 'val'; the lsb of the value will be aligned to the start
// of each row (and 'width' should be a multiple of 4).
//
// There is also vmemset_2d_32_general_asm
// .. which has exactly the same form but no restriction on dst pitch.
// It will make multiple calls to vmemset_32_2d_asm as needed; for instance
// if the dst pitch is a multiple of 1/4 vector (but not 1/2) it will
// make 4 calls, each of which fills every 4th row.
//
#if !( defined(__HVXDBL__) || __HVX_LENGTH__ == 128)
#warning Probably you don't really want HVX64
#define VECN 64
#define VECLOGN 6
#else
#define VECN 128
#define VECLOGN 7
#endif
// r0 -> dstp
// r1 -> fillval
// r2 -> width
// r3 -> height
// r4 -> dst_stride
#define dstp r0
#define fillval r1
#define width r2
#define height r3
#define dst_stride r4
#define dstp2 r5
#define startoff r7
#define endpos r8
#define middlecount r9
#define tmp r10
#define qnLeft q0
#define qRight q1
#define vFill v0
.globl vmemset_32_2d_asm
.balign 16
.type vmemset_32_2d_asm,@function
vmemset_32_2d_asm:
{
startoff = and(dstp,#VECN-1); // destination offset
qnLeft = vsetq(dstp); // the 'left' mask
vFill = vsplat(fillval);
}
// set qnLeft up to be 1 1 1 .. 0 0 0
// to be the 'start' mask for (inverted) the first output vector in each row;
// and qRight to 1 1 1 .. 0 0 0 to be the 'end' mask for the last vector.
// If there is only one output vector per row we'll and them later.
{
p0 = cmp.gt(width,#0) // protect against ht <=0, wid <= 0
p0 = cmp.gt(height,#0)
//dstp = sub(dstp,startoff); // align the dest address (not needed)
endpos = add(startoff,width); // start off + wid: if this > VECN, needs >1 vector write per row
} {
if(!p0) jumpr:nt r31; // done if ht <= 0 or wid <= 0
#if __HEXAGON_ARCH__ < 62
tmp = and(endpos,#VECN-1) // test if end falls on a boundary
qRight = or(qnLeft,!qnLeft); // the 'end' mask (last partial write - all 1's if falls on a boundary).
} {
p1 = cmp.eq(tmp,#0); //
if( p1.new) jump:nt .L_mskskip;
m0 = dst_stride; // stride
} {
qRight = vsetq(endpos); // this the 'end' mask in the general case.
}
.L_mskskip:
#else
qRight = vsetq2(endpos);
m0 = dst_stride;
}
#endif
{
vFill = vlalign( vFill,vFill,startoff); // adjust 16- or 32-bit value for pointer addr.
// test if > 1 output vector per row is needed.
endpos = add(endpos,#-(VECN+1)); // for finding middlecount
P0=!cmp.gt(r8,#VECN); // true if single vector per row
if( P0.new) jump:nt .L_row_onevec; // r8 >= 0 if not taken.
} {
p0 = !cmp.gt(endpos,#VECN-1) // true if 2 vectors per row
loop0(.L_loop_twovec,height);
dstp2 = add(dstp,#VECN) // needed in the loop
if( p0.new ) jump:nt .L_row_twovec;
} {
p0 = tstbit(endpos,#VECLOGN) // p0 is true if middlecount is odd #
p2 = cmp.gt( endpos, #VECN*5-1) // true if middlecount >= 5
p3 = cmp.gt( endpos, #VECN*3-1) // true if middlecount >= 3
} {
if(p2) jump:nt .L_handle_general_outer; // go handle the middlecount >=5 situation
if(p3) jump:nt .L_handle_general_outer_34; // middlecount = 3 or 4
} {
loop0(.L_loop_general_outer_12,height);
}
// this loop is used when them middlecount is 1 or 2
//
.balign 16
.L_loop_general_outer_12:
{
if( !qnLeft ) vmem(dstp++m0) = vFill;
dstp2 = add(dstp,#VECN);
} {
if(!p0)vmem(dstp2++#1) = vFill; // do this if middlecount is 2.
} {
vmem(dstp2++#1) = vFill;
} {
if( qRight ) vmem(dstp2+#0) = vFill;
}:endloop0;
{ jumpr r31; } // !! all done
// this loop is used when the middlecount is 3 or 4
//
.balign 16
.L_handle_general_outer_34:
{
loop0(.L_loop_general_outer_34,height);
}
.balign 16
.L_loop_general_outer_34:
{
if( !qnLeft ) vmem(dstp++m0) = vFill;
dstp2 = add(dstp,#VECN);
} {
if(!p0)vmem(dstp2++#1) = vFill; // only when 4.
} {
vmem(dstp2++#1) = vFill;
} {
vmem(dstp2++#1) = vFill;
} {
vmem(dstp2++#1) = vFill;
}
{
if( qRight ) vmem(dstp2+#0) = vFill;
}:endloop0;
{ jumpr r31; } // !! all done
// This loop is used when the 'inner' vectors are at least 5.
// A lot of strangeness here to avoid having a short inner loop with a low loop count
// inside an outer loop with a large loop count...
//
// inner part consists of:
// - unconditional store
// - conditional store #1
// - conditional store pair #2
// - loop of 4 stores
// So the plan is like this (mc = actual middle count)
//
// mc cond#1 cond#2 innerloops
// 5 0 0 1
// 6 1 0 1
// 7 0 1 1
// 8 1 1 1
// 9 0 0 2
// 10 1 0 2
// etc.
// So the "conditional store#1 is done when p0 is false, we already have that.
// To get the rest:
// (1) subtract VECN from endpos; bit VECLOGN+1 will give the condition for #2 in p1
// (2) >> that result by VECLOGN+2 to get the innerloop count.
//
//
.L_handle_general_outer:
{
tmp = add(endpos, #-VECN );
loop1(.L_loop_general_outer,height );
} {
middlecount = lsr(tmp,#VECLOGN+2) // # actually (middlecount-1) /4
p1 = tstbit(tmp,#VECLOGN+1) // p1 is true if actual middlecount is 4*k or 4*k+3
} {
loop0(.L_loop_general_inner,middlecount );
}
.balign 16
.L_loop_general_outer:
{
if( !qnLeft ) vmem(dstp++m0) = vFill;
dstp2 = add(dstp,#VECN);
} {
vmem(dstp2++#1) = vFill;
} {
if(!p0)vmem(dstp2++#1) = vFill;
} {
if(p1)vmem(dstp2++#1) = vFill;
} {
if(p1)vmem(dstp2++#1) = vFill;
}
.balign 16
.L_loop_general_inner:
{
vmem(dstp2++#1) = vFill;
} {
vmem(dstp2++#1) = vFill;
} {
vmem(dstp2++#1) = vFill;
} {
vmem(dstp2++#1) = vFill;
}:endloop0;
{
loop0(.L_loop_general_inner,middlecount );
if( qRight ) vmem(dstp2+#0) = vFill;
}:endloop1;
{ jumpr r31; } // !! all done
////////////////////////////////////////////////////////////////////////////////////////////
// two vectors per row
//
.balign 16
.L_row_twovec: // only two output vector per row...
.L_loop_twovec:
{
if( !qnLeft ) vmem(dstp++m0) = vFill;
} {
if( qRight ) vmem(dstp2++m0) = vFill;
}:endloop0;
{ jumpr r31; } // !! all done
////////////////////////////////////////////////////////////////////////////////////////////
// only one vector per row
//
.balign 16
.L_row_onevec: // only one output vector per row...
{
loop0(.L_loop_onevec,height); // set up 1-vector loop
qnLeft = or(qnLeft,!qRight); // make combined mask
}
.balign 16
.L_loop_onevec:
{
if( !qnLeft ) vmem(dstp++m0) = vFill;
}:endloop0;
{ jumpr r31; } // !! all done
.LtmpX:
.size vmemset_32_2d_asm, .LtmpX-vmemset_32_2d_asm
#undef dstp
#undef fillval
#undef width
#undef height
#undef dst_stride
#undef dstp2
#undef startoff
#undef endpos
#undef middlecount
#undef tmp
#undef qnLeft
#undef qRight
#undef vFill
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// General vmemset_2D, which allows any rowpitch,
// by calling the other copy multiple times as needed.
// The better the alignment of the supplied row pitches, the fewer
// calls are needed.
//
// Note that this *not* check for dst_pitch == wid
// (which could be done in a single fill much more efficiently).
//
// r0 -> dstp
// r1 -> fillval
// r2 -> width
// r3 -> height
// r4 -> dst_stride
//
#define dstp r0
#define fillval r1
#define width r2
#define height r3
#define dst_stride r4
#define alignlog r6
#define itercount r8
#define iter_dest r20
#define iter_fillval r21
#define iter_wid r22
#define iter_height r23
#define iter_stride r24
#define keep_stride r25
#define iter_counter r26
#define iter_dec_at r27
.globl vmemset_32_2d_general_asm
.balign 16
.type vmemset_32_2d_general_asm,@function
vmemset_32_2d_general_asm:
////
// if ht <= 1, or if row pitch is multiples of VECN, just go do
// the regular vmemset.
// @@ could also check width=1, which could be just a scalar store loop.
//
{
r7= fp; // stack frame is speculative...
memd(sp+#-40)=r21:20 // since we might just jump to the other function.
alignlog = ct0(dst_stride);
allocframe(#32)
} {
p1 = cmp.gt( height,#1); // false if only one row
p1 = !cmp.gt( alignlog,#VECLOGN-1 ); // false if aligned
if( !p1.new ) jump:nt .L_jjtoit // jump if either condition false
if( p1.new) memd(sp+#8)=r23:22
} {
itercount = #1;
alignlog = sub( #VECLOGN, alignlog ); // will be >= 1
memd(sp+#16)=r25:24
}
// K = (1<<alignlog) is the number of loops we need.
// at least 2; at most VECN.
// (if ht is less, we only do that many, each will be 1 row).
// Note that e.g. if K=4 and ht = 33,
// Four calls are done, and the 'ht' values to the calls will be
// 9, 8, 8, 8
// this is done by finding the first height (ht/K, rounded up)
// and then figuring out when the height should be reduced by 1
// A value is placed in iter_dec_at, when it matches the downcount (itercount), the
// remaining operations will be smaller in height by 1.
//
{
itercount = asl( itercount, alignlog); // 'K'
iter_stride = asl( dst_stride, alignlog); // dst row pitch for each iter (aligned)
iter_dest = dstp;
memd(sp+#24)=r27:26
} {
iter_dec_at = add(itercount,#-1) // K-1
iter_counter = min( itercount,height); // # of loops to do.
}
// iter_height is (height + K-1) >> alignlog
// iter_dec_at is iter_counter - ( height&(K-1))
// which is 0 when ht < K, and K-(height%K) otherwise.
{
iter_wid = width; // width
iter_height = add(height,iter_dec_at); // height + K-1
iter_dec_at = and(height,iter_dec_at); // height & (K-1)
} {
iter_dec_at = sub(iter_counter,iter_dec_at); // K - (ht&(K-1)) [->zero when ht < K]
iter_fillval = fillval
keep_stride = dst_stride
iter_height = asr( iter_height, alignlog) // height for the 1st operation
}
// iter_counter = # of loops to do (>= 1)
// r20..r24 = values for r0..4 for next call
// iter_dec_at = loop count at which 'iter_height' needs to be 1 less
// keep_stride = original row stride (for bumping address between call).
//
.balign 16
.L_rcgloop:
{
r1:0 = combine(iter_fillval,iter_dest) // ptr, fillval
r3:2 = combine(iter_height,iter_wid) // width, height
r4 = iter_stride // stride
call vmemset_32_2d_asm
} {
iter_counter = add(iter_counter,#-1) // count the loop...
iter_dest = add(iter_dest,keep_stride) // bump dest pointer by original dest pitch
} {
p1 = cmp.eq(iter_counter,iter_dec_at) // time to dec 'ht'?
if( p1.new ) iter_height = add(iter_height,#-1)
p0 = cmp.gt( iter_counter,#0)
if( p0.new ) jump:t .L_rcgloop // loop till done
}
// restore registers, and done...
{
r21:20 = memd(sp+#0 )
r23:22 = memd(sp+#8 )
} {
r25:24 = memd(sp+#16 )
r27:26 = memd(sp+#24 )
} {
dealloc_return
}
.L_jjtoit:
/// call becomes a single call to vmemset_32_2d_asm if the
// row pitch is aligned, or if ht <= 1.
// undo the 'allocframe' we started...
// and just go to vmemset_2d_asm
// no regs have changed except fp and sp (and r6,r7 clobbered, which is ok)
{ sp = add(fp,#8);
fp = r7;
jump vmemset_32_2d_asm
}
.LtmpY:
.size vmemset_32_2d_general_asm, .LtmpY-vmemset_32_2d_general_asm
|
XiaoMi/nnlib | 4,939 | hexagon/asm_src/maxpool_nonaligned_hvx.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
#if 0
for (z = 0; z < out_depth; z++) {
/* foreach window y * foreach window x */
sum = 0;
for (in_y = start_y; in_y < end_y; in_y++) {
for (in_x = start_x; in_x < end_x; in_x++) {
uint32_t data = in0[z + in_depth * in_x + in_depth * in_width * in_y];
sum = max(sum,data);
}
}
out0[z] = sum;
}
#endif
.global maxpool_nonaligned_hvx
.type maxpool_nonaligned_hvx, @function
.balign 32
maxpool_nonaligned_hvx:
/* ============================================================================ */
#define dsto r0 //dest ptr
#define srco r1 //src ptr
#define image_depth r2 //num bytes
#define win_width r3
#define win_height r4
#define image_width r5
#define stride r7
#define stride0 r8
#define c0101 r9
#define src r10
#define width r11 //write width
#define dalign r12
#define mdsto r13
#define z1z0 v1:0
#define z0 v0
#define z1 v1
#define x0 v2
#define y0 v3
#define z2 v4
#define z3 v5
#define vzero v6
#define d0 v7
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{
M0 = image_depth
stride = sub(image_width, win_width)
src = srco
} {
loop0(.L_horz, win_width)
z0 = #0
} {
stride = mpyi(stride, image_depth)
loop1(.L_vert, win_height)
srco = add(srco, #128)
}
/* ============================================================================ */
.balign 32
.L_vert:
.L_horz:
{
x0 = vmemu(src++M0) //+in_depth* in_x
} {
z0.ub = vmax(x0.ub, z0.ub) //
}:endloop0
{
src = add(src, stride)
loop0(.L_horz, win_width)
}:endloop1
{
image_depth = add(image_depth, #-128) //160
loop1(.L_vert, win_height)
dalign = and(dsto, #127) //0
width =image_depth //
} {
p0 = cmp.gt(image_depth, #0) //0
if(p0.new) width = #128 //128
mdsto = sub(#0, dsto)
qprolog = vsetq(dsto) //vmem(dsto++#1) = y0 onaligned //76543210
} {
d0 = vror(z0, mdsto) //54321076
dalign = add(dalign, width) //128
} {
qepilog = vsetq(dalign) //________
p1 = cmp.gt(dalign, #127) //0 is block not less than 128 bytes
if(p1.new) jump:nt .L_gt
}
{
qepilog = and(qepilog, !qepilog) //________
qprolog = or(qprolog, !qepilog) //________
}
.L_gt:
{
z0 = #0
if( qepilog) vmem(dsto+#1) = d0 //________
src = srco
} {
srco = add(srco, #128)
if(!qprolog) vmem(dsto++#1) = d0 //76543210
if(p0) jump .L_vert
}
jumpr r31
.L_end:
/*==============================================================================*/
.size maxpool_nonaligned_hvx, .L_end-maxpool_nonaligned_hvx
|
XiaoMi/nnlib | 25,406 | hexagon/asm_src/gvconv2dbbb_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gvm vector matrix multiply, result left at */
/* 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> K*N/256+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 960 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_h.S"
.global gvconv2dbbb_asm
.balign 32
.type gvconv2dbbb_asm, @function
gvconv2dbbb_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define in_width r3 //(pad_x+in_width) * depth
#define out_width r4 //out_width
#define m r5 //is stride of the output matrix always mult of 32
#define stride_depth r6 //0 stride|depth between computations
#define filt_width r7 //1 depth*filt_width
#define filt_height r8 //2 filt_hieght lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_datasum r10 //4
#define ptr_weightsum r11 //5
#define ptr_max r12 //6
#define ptr_biasbuf r14 //7 sat8 ((0x8000 + (x + biass)*recip_level)>>16)
#define recip_level r15 //8
#define PREFETCH r28 //64
/*=============================================================================*/
#define sel r8
#define len r9
#define filt_skip r13 //the skip back after the fot_width is done for next filt_y
#define stride3_1 r1
#define ptr_x0 r11
#define stride4 r13 //
#define stride r25
#define next_outputs r23 //jump to input ptr for next set of outputs
#define ptr_y r24 //
#define col_count r22
#define xsum r0 //kernel sum * filt_offset computed externally
#define pre_x r26
#define fetch_count p2 //r27 is free !
#define c4 r6
#define round_amt r6 //amount to add to bias buf odffset computation
#define sel0 r16
#define sel1 r18
#define sel2 r19
#define sel3 r20
#define one r17
#define tmp_ptr_z r21
#define MSTRIDE M0 //stride*depth
#define M4STRIDE_1 M1 //3*stride*depth-16 0-1-2-3
//01234567
#define x07x04x03x00 r21:20 //11-----1
#define x07x04 r21 //11-----1
#define x03x00 r20 //1------1
#define x0fx0cx0bx08 r15:14 //1111---1
#define x0fx0c r15 //1111---1
#define x0bx08 r14 //111----1
#define x17x14x13x10 r19:18 //11------
#define x17x14 r19 //11------
#define x13x10 r18 //1-------
#define x1fx1cx1bx18 r17:16 //1111----
#define x1fx1c r17 //1111----
#define x1bx18 r16 //111-----
#define x27x24x23x20 r21:20 //---111--
#define x27x24 r21 //---111--
#define x23x20 r20 //---11---
#define x2fx2cx2bx28 r19:18 //---1111-
#define x2fx2c r19 //---11111
#define x2bx28 r18 //---1111-
#define x37x34x33x30 r15:14 //----11--
#define x37x34 r15 //----11--
#define x33x30 r14 //----1---
#define x3fx3cx3bx38 r17:16 //----1111
#define x3fx3c r17 //----1111
#define x3bx38 r16 //----111-
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vwsum v15 //
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define vc8000 v14 //
#define biasvec v18 //
#define recipvec v16 //
#define rndvec v17 //
#define vpreds v19 //
/*=============================================================================*/
{
sel = ##0x01010101 // entry 0
len = #32 //
dcfetch(ptr_x)
} {
q0 = vsetq(len); // 1000
len = #64 //
round_amt = ##0x00008000 //
} {
vpreds = vand(q0, sel) //
q2 = vsetq(len); // 1100
len = #96 //
rndvec = vsplat(round_amt) //
} {
q1 = and(q2, !q0) // 0100
q3 = vsetq(len) // 1110
sel = add(sel, sel) //02020202
dcfetch(ptr_x+#32)
} {
vpreds|= vand(q1, sel) //
q2 = and(q3, !q2) // 0010
q3 = not(q3) // 0001
sel = add(sel, sel) //04040404
} {
vpreds|= vand(q2, sel) //
sel = add(sel, sel) //08080808
dcfetch(ptr_x+#64)
} {
vpreds|= vand(q3, sel) // entry 3 10101010 selects all zero
stride_depth = memw(sp+#0<<2) //extract stride*depth
filt_width = memw(sp+#1<<2) //extract filt_width*depth
} {
filt_height = memw(sp+#2<<2) //extract filt_height
out_height = memw(sp+#3<<2) //number of output lines
p0 = cmp.eq(filt_width, #1)
} {
ptr_datasum = memw(sp+#4<<2) //data sum ptr
ptr_weightsum = memw(sp+#5<<2) //ptr pre computed weight sum
filt_width = mpy(filt_width.L, stride_depth.L)
} {
ptr_max = memw(sp+#6<<2) //ptr pre computed max value in output
ptr_biasbuf = memw(sp+#7<<2) //read in the ptr to the bias buffer value
} {
biasvec = vmem(ptr_biasbuf+#0) //
recip_level = memw(sp+#8<<2) //
p3 = cmp.gt(filt_width, #192)
} {
recipvec = vsplat(recip_level) //
allocframe(#72) //
PREFETCH = #96
} {
memw(sp+#68) = r28
if(p0) PREFETCH = add(PREFETCH, #-32)
} {
memd(sp+#32) = r25:24 //
memd(sp+#0) = r17:16 //
stride = lsr(stride_depth, #16) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
stride_depth = mpy(stride_depth.H, stride_depth.L)
} {
M0 = stride_depth //
memd(sp+#8) = r19:18 //
memd(sp+#40) = r27:26 //
} {
memw(sp+#48) = ptr_x //
memw(sp+#52) = ptr_yi //
r17 = asl(stride_depth, #2)
} {
if(!p3) PREFETCH = r17 //skip full block
} {
vwsum = vmem(ptr_weightsum+#0) //
stride3_1 = addasl(stride_depth, stride_depth,#1) //3*stride
r16 = ##0x80000001 //max negative
} {
stride3_1 = sub(#16, stride3_1) //
next_outputs = mpyi(filt_height, in_width)
vc8000 = vsplat(r16) //
memw(sp+#56) = out_width //
} {
M1 = stride3_1 // add to
stride4= asl(stride_depth, #1) //4-2*stride to corrct for outper pipeline
stride3_1 = add(stride3_1, #16) //used for dc prefetch
//p3 = cmp.gt(stride_depth, #96) //is !(D <= 96) heuristic to fix prefetch
} {
memw(sp+#60) = m //
next_outputs = sub(next_outputs, stride4)
filt_skip = sub(filt_width, in_width)
filt_width = lsr(filt_width, #4) //filt_width / 16
} {
maxe= vmem(ptr_max+#0)
in_width = mpyi(in_width, stride) //
filt_width = add(filt_width, #-1)
stride3_1 = sub(stride3_1, stride_depth) //32 - 4*stride_depth used for dc prefetch
}
/*============================================================================*/
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#48) //ptr_x
out_height = add(out_height, #-1) //
} {
col_count = memw(sp+#56) //out_width
memw(sp+#48) += in_width //ptr_x=add(ptr_x,in_width) //ptr_x+=in_width
pre_x = add(ptr_x0, PREFETCH)
}
.balign 32
.L_width:
{
ptr_y = memw(sp+#52) //ptr_y = ptr_yi initialize filter pointer
fetch_count = !cmp.eq(r2, r2) //#0
} {
loop1(.L_filt_height, filt_height) //[P, 0]for(filt_y=0; filt_y < n; filt_y+=1){
y0 = vmem(ptr_y++#2) //[0, 0]32x4
dcfetch(pre_x)
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z1z0 = vcombine(vwsum, vwsum) //[P, 0]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
} {
z3z2 = vcombine(vwsum, vwsum) //[P, 0]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}
.balign 32
.L_filt_height:
{
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 4]
y2 = vmem(ptr_y++#2) //[0, 4]32x4
dcfetch(pre_x)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 5]
y3 = vmem(ptr_y+#-1) //[0, 5]32x4
pre_x = add(pre_x, stride_depth)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 6]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 6]
} {
fetch_count = not(fetch_count) //[0, 6]
if(fetch_count) pre_x = add(pre_x, stride3_1) //[0, 6.5]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[0, 6.5]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[0, 6.5]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 7]
} {
p0 = cmp.eq(filt_width,#0)
if (p0.new) jump:nt .L_skip
x3fx3cx3bx38 = memd(ptr_x0+#8) //[0, 7]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[0, 7]
}
.balign 32
.L_filt_width:
{
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 8]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(pre_x)
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 9]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,10]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[1, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,11]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[1, 3]
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 4]
y2 = vmem(ptr_y++#2) //[1, 4]32x4
dcfetch(pre_x)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 5]
y3 = vmem(ptr_y+#-1) //[1, 5]32x4
pre_x = add(pre_x, stride_depth)
} {
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 6]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[1, 6.5]
dcfetch(pre_x) //extra dcfetch
} {
if(fetch_count) pre_x = add(pre_x, stride3_1) //[0, 6.5]
fetch_count = not(fetch_count)
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 6]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[1, 6.5]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 7]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[1, 7]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[1, 7]
}:endloop0
.L_skip:
{
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 8]
ptr_x0 = sub(ptr_x0, filt_skip) //[E, 0]move to next line ptr_y keeps going
y0 = vmem(ptr_y++#2) //[0, 0]32x4
} {
fetch_count = !cmp.eq(r2, r2) //#0
pre_x = add(ptr_x0, PREFETCH) //
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 9]
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
dcfetch(pre_x)
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,10]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,11]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}:endloop1
{
ptr_x0 = sub(ptr_x0, next_outputs) //#0reset data ptr to next set of 4
xsum = memw(ptr_datasum++#1<<2) //
y0 = rndvec //out0 = 0x8000
sel0 = extractu(ptr_z, #2, #5) //xx00000
} {
x0 = vsplat(xsum) //
m = memw(sp+#60) //
pre_x = add(ptr_x0, PREFETCH) //generate l1 prefetch ptr
one = #1 //
} {
sel0 = asl(one, sel0) //which predicate for out 0
z0.w = vadd(z0.w, x0.w) //add data sum
p0 = cmp.gt(col_count, #1) // are there at least 2 levt?
y1 = rndvec //#1out1 = 0x8000
} {
maxe.w = vmax(maxe.w, z0.w) //see if z0 is max
z0.w = vadd(z0.w, biasvec.w) //add data sum
sel0 = vsplatb(sel0) // 01 -> 01010101
if(p0) xsum = memw(ptr_datasum++#1<<2) // #1
} {
dcfetch(ptr_x0) //
q0 = vand(vpreds, sel0) //
x1 = vsplat(xsum) //#1
p1 = cmp.gt(col_count, #2) //#2
} {
y0.w += vmpyie(z0.w, recipvec.uh) //
z1.w = vadd(z1.w, x1.w) //#1
if(p1) xsum = memw(ptr_datasum++#1<<2) //#2
y2 = rndvec //#2out2 = 0x8000
} {
x1.w = vadd(z1.w, biasvec.w) //#1add data sum
if(!p0) z1 = vc8000 //#1
x2 = vsplat(xsum) //#2
p2 = cmp.gt(col_count, #3) //#3
} {
y0.h = vpacko(y0.w, y0.w) //>>16
maxe.w = vmax(maxe.w, z1.w) //#1
z2.w = vadd(z2.w, x2.w) //#2
if(p2) xsum = memw(ptr_datasum++#1<<2) //#3
} {
y1.w += vmpyie(x1.w, recipvec.uh) //#1
x2.w = vadd(z2.w, biasvec.w) //#2add data sum
if(!p1) z2 = vc8000 //#2
dcfetch(ptr_x0+#32) //
} {
y3 = rndvec //#3out3 = 0x8000
y0.ub = vpack(y0.h, y0.h):sat //sat8 <0, >255
tmp_ptr_z = add(ptr_z, m) //
x3 = vsplat(xsum) //#3
} {
sel1 = extractu(tmp_ptr_z, #2, #5) //#1
one = mux(p0, #1, #16) //#1
y2.w += vmpyie(x2.w, recipvec.uh) //#2
} {
sel1 = asl(one, sel1) //#1
y1.h = vpacko(y1.w, y1.w) //#1>>16
maxe.w = vmax(maxe.w, z2.w) //#2
z3.w = vadd(z3.w, x3.w) //#3
} {
if(q0) vmem(ptr_z+#0):nt = y0 //[E, ]store first 32bytes
sel1 = vsplatb(sel1) //#1
y2.h = vpacko(y2.w, y2.w) //#2>>16
x3.w = vadd(z3.w, biasvec.w) //#3add data sum
} {
dcfetch(ptr_x0+#64) //
q1 = vand(vpreds, sel1) //#1
y1.ub = vpack(y1.h, y1.h):sat //#1sat8 <0, >255
ptr_z = add(ptr_z, m) //
} {
col_count = add(col_count, #-4) //
if(p0)tmp_ptr_z = add(tmp_ptr_z, m) //#1
y3.w += vmpyie(x3.w, recipvec.uh) //#3
if(!p2) z3 = vc8000 //#3
} {
sel2 = extractu(tmp_ptr_z, #2, #5) //#2
one = mux(p1, #1, #16) //#2
if(p1)tmp_ptr_z = add(tmp_ptr_z, m) //#2
maxe.w = vmax(maxe.w, z3.w) //#3
} {
sel2 = asl(one, sel2) //#2
y3.h = vpacko(y3.w, y3.w) //#3>>16
sel3 = extractu(tmp_ptr_z, #2, #5) //#3
one = mux(p2, #1, #16) //#3
} {
if(q1) vmem(ptr_z+#0):nt = y1 //#1[E, ]store 2nd 32bytes
sel2 = vsplatb(sel2) //#2
y2.ub = vpack(y2.h, y2.h):sat //#2sat8 <0, >255
sel3 = asl(one, sel3) //#3
} {
if(p0)ptr_z = add(ptr_z, m) //#1
q2 = vand(vpreds, sel2) //#2
y3.ub = vpack(y3.h, y3.h):sat //#3sat8 <0, >255
sel3 = vsplatb(sel3) //#3 08 -> 08080808
} {
if(q2) vmem(ptr_z+#0):nt = y2 //#2[E, ]store 2nd 32bytes
if(p1)ptr_z = add(ptr_z, m) //#2
q3 = vand(vpreds, sel3) //#3
} {
if(q3) vmem(ptr_z+#0):nt = y3 //#3[E, ]store 2nd 32bytes
if(p2)ptr_z = add(ptr_z, m) //#3
p3 = cmp.gt(col_count, #0) //
if(p3.new) jump:t .L_width //
}//end cols per line
{
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
{
loop0(.L_peak, #5) //[P, 0]
c4 = #4 //
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
r28 = memw(sp+#68) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbb_asm, .L_end-gvconv2dbbb_asm
|
XiaoMi/nnlib | 4,304 | hexagon/asm_src/qlrn_acc_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : qlrn_acc_asm */
/* */
/* DESCRIPTION */
/* X matrix to be squared horizontally and accumulated. */
/* */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/*======================================================================*/
#if 0
int32_t qlrn_acc_cn(int32_t numelem, const int16_t *ptr_xvec)
{
int32_t xvec;
int32_t xacc = 0;
for (int32_t i = 0; i < numelem; i++) {
xvec = *ptr_xvec;
ptr_xvec += 2;
xacc += (int32_t)(xvec * xvec);
}
return xacc;
}
#endif
/*=============================================================================*/
.text
.file "qlrn_acc_h.S"
.global qlrn_acc_asm
.type qlrn_acc_asm, @function
/*=============================================================================*/
#define xacc r0 //accumulation summation value
#define numelem r0 //numelem is number of elements to be squared+summed
#define ptr_xvec r1 //ptr to vector of elements
#define xvec r6 //
#define retreg r31 //
/*=============================================================================*/
qlrn_acc_asm:
{
loop0(.L_loop0,numelem) //loop-config
dcfetch(ptr_xvec+#0) //prefetch
xacc = xor(xacc,xacc) //xacc=0;
xvec = memh(ptr_xvec++#4) //xvec=*ptr_xvec;ptr_xvec+=2;
}
.L_loop0:
{
xvec = mpyi(xvec,xvec) //(int32_t)(xvec*xvec);
}
{
xacc = add(xacc,xvec) //xacc+=(int32_t)(xvec*xvec)
xvec = memh(ptr_xvec++#4) //xvec=*ptr_xvec;ptr_xvec+=2;
}:endloop0
{
jumpr retreg //return;(xacc computation done)
}
.L_end:
/*=============================================================================*/
.size qlrn_acc_asm, .L_end-qlrn_acc_asm
|
XiaoMi/nnlib | 31,128 | hexagon/asm_src/gvconv2dbbb_circ6_d32_v65_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Memory
CODESIZE 1120 bytes
STACK 112 bytes
Description
Utilize the v65 vrmpy instructions. Common wiehgts with 2 inputs and 2 outputs. 2 data inputs
are in a pair. Key is to feed each input with a different stream. Solution is to shuffle the
stream with a delayed version of itself. This doubles the size of the activations so a
smaller circular buffer of size filt_height*input depth*width*2.
Example depth = 16 shuffle blocks of 4 bytes together e.g. x00 =[x00.0,x00.1,x00.2,x00.3]
x00 x01 x02 x03|x10 x11 x12 x13|x20 x21 x22 x23|x30 x31 x32 x33
x40 x41 x42 x43|x50 x51 x52 x53|x60 x61 x62 x63|x70 x71 x72 x73
x80 x81 x82 x83|x90 x91 x92 x93|xa0 xa1 xa2 xa3|xb0 xb1 xb2 xb3
xc0 xc1 xc2 xc3|xd0 xd1 xd2 xd3|xe0 xe1 xe2 xe3|xf0 xf1 xf2 xf3
to
x00 x40 x01 x41 x02 x42 x03 x43|x10 x50 x11 x51 x12 x52 x13 x53|
x20 x60 x21 x61 x22 x62 x23 x63|x30 x70 x31 x71 x32 x72 x33 x73|
x40 x80 x41 x81 x42 x82 x43 x83|x50 x90 x51 x91 x52 x92 x53 x93|
x60 xa0 x61 xa1 x62 xa2 x63 xa3|x70 xb0 x71 xb1 x72 xb2 x73 xb3|
x80 xc0 x81 xc1 x82 xc2 x83 xc3|x90 xd0 x91 xd1 x92 xd2 x93 xd3|
xa0 xe0 xa1 xe1 xa2 xe2 xa3 xe3|xb0 xf0 xb1 xf1 xb2 xf2 xb3 xf3|
xc0 xc1 xc2 xc3 |xd0 xd1 xd2 xd3 |
xe0 xe1 xe2 xe3 |xf0 xf1 xf2 xf3 |
So each memd access into the buffer access two streams which are delayed from each other.
While this is occuring the sequence can be aligned so that the extra computation on the
ends can be minimized.
To further minimize memory the circular buffer is updated inside the kernel each
line.
The code only processes 32 sets of weights at once inner loop is optimized
to 6 packets.
*/
/*===============================================================================*/
.text
.file "gvconv2dbbb_circ6_d32_v65_h.S"
.global gvconv2dbbb_circ6_d32_v65_asm
.balign 32
.type gvconv2dbbb_circ6_d32_v65_asm, @function
gvconv2dbbb_circ6_d32_v65_asm:
/*===============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug vec reg
/*===============================================================================*/
/* ---------------------------------- CALL REGS -------------------------------- */
#define ptr_xi r0 //12 activation data
#define ptr_wi r1 //13 weights
#define ptr_zi r2 //14 results
#define next_inbuf_width r3 //(pad_l+in_width+pad_r)
#define out_width_depth r4 //next line amount
#define out_width r5 //15 amount of work to be done
#define stride_h_w r6 //30 stride_height, stride_width
#define in_depth r22 //31 input depth multiples of 32
#define filt_width r23 //32 horizontal fuilter width
#define filt_height r8 //33 filt_height lines per filter
#define out_height r9 //34 number of vertical lines to perform
#define ptr_filtsum r24 //35 includes the computation filt_sum * in_offset + biasvec
#define ptr_max r28 //36 maximum and minum buffer
#define recip_level r26 //37 255 / (MAX - MIN) - used to scale to bytes
#define out_width_32 r7 //38 actual out_width in depth32
#define ptr_cbufi r16 //39 read buffer pointer
#define zshift r21 //40 extra shift on output before quantization
#define in_zero r25 //41
#define store_cntrl r11 //42
//#define ptr_equalize r17 //43
#define cbuf_eob r18 //18 end of cuirc buffer
#define cbuf_size r19 //19 size in bytes of circ buf -1
/* --------------------------------- SCALER REGS ------------------------------- */
#define delta r7 //difference ebetween stride height and filt_height
#define cm4 r2 //shuffle/deal ints
#define col_count r2 //horizontal counter
#define in_width_32 r3 //total input width in bytes in buffer
#define x41_x11 r15:14 //4n+1 inputs
#define x40_x10 r17:16 //4n+0 inputs
#define x30_x00 r15:14 //4n+0 inputs
#define x31_x01 r17:16 //4n+1 inputs
#define x50_x20 r15:14 //4n+0 inputs
#define x51_x21 r17:16 //4n+1 inputs
#define ptr_wi_ptr_xi r1:0 //
#define fetch_ptr_base r1 //base pointer for l1 prefetch
#define fetch_ptr r10 //current pointer for l1 prefetch
#define ones r11 //3*stride
#define vplut r14 //lookup table for store contro`l
#define align r15
#define stride r12 //current to next input
#define ptr_x0 r26 //base input pointer
#define ptr_x10 r13 //current even input ptr
#define ptr_x11 r27 //current odd input ptr
#define ptr_w0 r20 //even output depth 32 weights
#define ptr_z0 r0 //even output depth 32 outputs
#define adjust r10 //
/* ---------------------------------- VEC REGS -------------------------------- */
#define wscale v11 //
#define vin_zero v9 //
#define s05_s02 v15:14 //even output accs 2,5
#define s04_s01 v13:12 //even output accs 1,4
#define s03_s00 v11:10 //even output accs 0,3
#define s05 v15 //even acc 5
#define s02 v14 //even acc 2
#define s04 v13 //even acc 4
#define s01 v12 //even acc 1
#define s03 v11 //even acc 3
#define s00 v10 //even acc 0
#define s15_s12 v23:22 //even output accs 2,5
#define s14_s11 v21:20 //even output accs 1,4
#define s13_s10 v19:18 //even output accs 0,3
#define s13 v19 //even output accs 3
#define s10 v18 //even output accs 0
#define s14 v21 //even output accs 4
#define s11 v20 //even output accs 1
#define s15 v23 //even output accs 5
#define s12 v22 //even output accs 2
#define vpred0 v24 //
#define vpred1 v25 //
#define vpred2 v16 //
#define vzero v30 //
#define w00 v0 //weights even 0-31
#define w01 v3 //weights even 32-63
#define vrecip v1 //reciprocal 255/MAx replicated
#define s0_sh v8 //round value
#define s1_sh v26 //round value
#define wsum0 v2 //sum of weights column + bias add 0-31
#define d010 v27 //even lines upper 16bit packed accs 0,1
#define d032 v28 //even lines upper 16bit packed accs 2,3
#define d03210 v28 //8bit shifted, packed saturated 0-3
#define d054 v29 //even lines upper 16bit packed accs 4,5
#define maxo_maxe v5:4 //packed maxes
#define maxo v5 //odd maxes
#define maxe v4 //even maxes
//#define mino_mine v7:6 //packed mins
#define mino v18 //odd mins
#define mine v17 //even mins
#define gmax v7 //packed mins
#define gmin v6 //packed mins
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/* --------------------------------------------------------------------------- */
{ allocframe(#112) //0th entry on stack is (112+8)/4=30 ints
stride_h_w = memw(sp+#0<<2) //stride horizontl and vertical
} {
memd(sp+#4<<2) = r21:20 //save 20,21
memd(sp+#6<<2) = r23:22 //save 22,23
r23 = #0x80000001
} {
memd(sp+#0<<2) = r17:16 //save 16,17
memd(sp+#2<<2) = r19:18 //save 18,19
maxe = vsplat(r23) // maxe <- -0x7fffffff
} {
memd(sp+#8<<2) = r25:24 //save 24,25
memd(sp+#10<<2) = r27:26 //save 26,27
mine.w = vabs(maxe.w) // mine <- +0x7fffffff
} {
memd(sp+#12<<2) = ptr_wi_ptr_xi //save weights:activation
memw(sp+#14<<2) = ptr_zi //save output ptr
} {
memw(sp+#15<<2) = out_width //save output width
filt_height = memw(sp+#33<<2) //filter height
stride = zxth(stride_h_w) //horizontal stride
} {
ptr_filtsum = memw(sp+#35<<2) //ptr to the sum of filters+offset
filt_width = memw(sp+#32<<2) //filter width
} {
in_depth = memw(sp+#31<<2) //input depth
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
} {
in_zero = memw(sp+#41<<2) //
cbuf_size = mpyi(filt_height, in_depth) //circular buffer size
dcfetch(ptr_xi+#0<<6) //
} {
ptr_max = memw(sp+#36<<2) //get max/min ptr
in_zero = vsplatb(in_zero) //
stride = asl(stride, #5) //32 * stride_w
dcfetch(ptr_xi+#1<<6) //
} {
zshift = memw(sp+#40<<2) //final shift 7 + 16
store_cntrl = memw(sp+#42<<2) //
vin_zero = vsplat(in_zero) //
cbuf_size = mpyi(cbuf_size, next_inbuf_width) //circular buffer size
} {
out_width_32 = memw(sp+#38<<2) //total width of output
ptr_cbufi = memw(sp+#39<<2) //circular buffer
cbuf_size = add(cbuf_size, cbuf_size) //x2
filt_width = asl(filt_width, #2) //*32/8
} {
cbuf_eob = add(ptr_cbufi, cbuf_size) //end of circ buffer marker
filt_height = mpyi(filt_height, in_depth) //total number of depth32 filter rows
recip_level = memw(sp+#37<<2) //255/max
gmax = vmem(ptr_max+#0) //
} {
cbuf_eob = add(cbuf_eob, #-4) //make so comparison is >= eob
gmin = vmem(ptr_max+#1) //
dcfetch(ptr_xi+#2<<6) //
} {
filt_width = add(filt_width, #-1) //account for epilog
filt_height = lsr(filt_height, #5) //num d32 rows in filter
vpred1 = vmem(store_cntrl+#1) //
dcfetch(ptr_xi+#3<<6) //
} {
filt_height = add(filt_height, #-1)
in_width_32 = asl(next_inbuf_width, #6) //next d32 line x 2
out_height = memw(sp+#34<<2) //height of output
vrecip = vmem(recip_level++#1) //used to compress to 8bits 255/max
} {
vpred2 = vmem(store_cntrl+#2) //
memw(sp+#37<<2) = recip_level //255/max
}
/* -------------------------------------------------------------------------- */
.balign 32
.L_height:
{ ptr_z0 = memw(sp+#14<<2) //output ptr for even lines
ptr_x0 = memw(sp+#12<<2) //ptr_x0=ptr_cbufi read circ buffer
s03_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
s13_s10 = #0 //
} { //buffer read ptr if ptr_xi >= buf_size-=size
memw(sp+#14<<2) += out_width_depth //update output ptr
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
s05_s02 = vcombine(wsum0,wsum0) //init sum2 and 6
out_height = add(out_height, #-1) //decrement height count
} {
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //if prefetch >= circ buffer wrap around
s04_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
s14_s11 = #0 //
ptr_w0 = memw(sp+#13<<2) //access ptr weight
} {
s15_s12 = #0 //
if(p1)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size) //wrap fetch ptr around independently
col_count = memw(sp+#15<<2) //initialize width count
nop //
}
.L_width:
{ p2 = cmp.eq(filt_height, #0)
loop1(.L_filt_height, filt_height) //setup vertical filte rloop
nop; nop; //
}{
if(p2) jump .L_last1 //
nop; nop; nop //
}
.balign 32
.L_filt_height:
{ w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
x50_x20 = memd(ptr_x0+stride<<#2) //[0, 0]load pt 3 and 7
p3 = sp1loop0(.L_filt_width, filt_width) //set up inne rloop for next time
ptr_x11 = add(ptr_x0, #8) //set up currne tinput ptr
} {
fetch_ptr = add(fetch_ptr_base, #0) //initial fetch ptr
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x51_x21 = memd(ptr_x11+stride<<#2) //[0, 1]
ptr_x10 = ptr_x0 //set up currne tinput ptr
}
.balign 32
.L_filt_width:
{ dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0. 2]
s05_s02.w += vrmpy(w00.b, x50_x20.ub) //[0, 2]macc 2,6 out 0
nop
} {
s15_s12.w += vrmpy(w01.b, x51_x21.ub) //[0, 3]acc 2,3,6,7
x40_x10 = memd(ptr_x10+stride<<#1) //[0, 3]load pt 1 5
x41_x11 = memd(ptr_x11+stride<<#1) //[0, 3]
nop
} {
s04_s01.w += vrmpy(w00.b, x40_x10.ub) //[0, 4]
s14_s11.w += vrmpy(w01.b, x41_x11.ub) //[0, 4]
x30_x00 = memd(ptr_x10++#2<<3) //[0, 4]load pts 0, 4
x31_x01 = memd(ptr_x11++#2<<3) //[0, 4]
} {
s03_s00.w += vrmpy(w00.b, x30_x00.ub) //[0, 5]acc 0,4,1,5 out 0
w00 = vmem(ptr_w0++#1) //[1, 0]1st 32 weights of out depth
x50_x20 = memd(ptr_x10+stride<<#2) //[1, 0]load pt 3 and 7
if(!p3) ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
} {
if(!p3) fetch_ptr_base=add(fetch_ptr_base,in_width_32)//if >= buf_size -= buf_size
s13_s10.w += vrmpy(w01.b, x31_x01.ub) //[0, 6]
w01 = vmem(ptr_w0++#1) //[1, 1]2nd 32weights stream 0
x51_x21 = memd(ptr_x11+stride<<#2) //[1, 1]
}:endloop0
{ dcfetch(fetch_ptr+#0<<6) //[1, 2]fetch 64bytes-2 lots 8 x 4 bytes
s05_s02.w += vrmpy(w00.b, x50_x20.ub) //[1, 2]macc 2,6 out 0
p1 = cmp.gt(fetch_ptr_base, cbuf_eob) //[E,10]
if(p1.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size)//[E,10]wrap around end fetch ptr
} {
s15_s12.w += vrmpy(w01.b, x51_x21.ub) //[1, 3]acc 2,3,6,7
x40_x10 = memd(ptr_x10+stride<<#1) //[1, 3]load pt 1 5
x41_x11 = memd(ptr_x11+stride<<#1) //[1, 3]
nop
} {
s04_s01.w += vrmpy(w00.b, x40_x10.ub) //[1, 4]
s14_s11.w += vrmpy(w01.b, x41_x11.ub) //[1, 4]
x30_x00 = memd(ptr_x10++#2<<3) //[1, 4]load pts 0, 4
x31_x01 = memd(ptr_x11++#2<<3) //[1, 4]
} {
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
if(p0.new)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
s03_s00.w += vrmpy(w00.b, x30_x00.ub) //[1, 5]acc 0,4,1,5 out 0
s13_s10.w += vrmpy(w01.b, x31_x01.ub) //[1, 5]
}:endloop1
.L_last1:
{ ptr_x11 = add(ptr_x0, #8) //set up currne tinput ptr
ptr_x10 = ptr_x0 //set up currne tinput ptr
ptr_x0 = add(ptr_x0, in_width_32) //if >= buf_size -= buf_size
w00 = vmem(ptr_w0++#1) //[0, 0]1st 32 weights of out depth
} {
x50_x20 = memd(ptr_x10+stride<<#2) //[0, 0]load pt 3 and 7
p0 = cmp.gt(ptr_x0, cbuf_eob) //[E,10]
if(p0.new)ptr_x0 = sub(ptr_x0, cbuf_size) //[E,10]wrap around end of buffer
loop0(.L_filt_width1, filt_width) //set up inne rloop for next time
} {
w01 = vmem(ptr_w0++#1) //[0, 1]2nd 32weights stream 0
x51_x21 = memd(ptr_x11+stride<<#2) //[0, 1]
fetch_ptr = add(ptr_x0, mpyi(stride, #12)) //stride*2*3 advance buffer by 6 outputs
nop
}
.balign 32
.L_filt_width1:
{ dcfetch(fetch_ptr+#0<<6) //[0, 2]fetch 64bytes-2 lots 8 x 4 bytes
fetch_ptr = add(fetch_ptr, #64) //[0. 2]
s05_s02.w += vrmpy(w00.b, x50_x20.ub) //[0, 2]macc 2,6 out 0
} {
s15_s12.w += vrmpy(w01.b, x51_x21.ub) //[0, 3]acc 2,3,6,7
x40_x10 = memd(ptr_x10+stride<<#1) //[0, 3]load pt 1 5
x41_x11 = memd(ptr_x11+stride<<#1) //[0, 3]
} {
s04_s01.w += vrmpy(w00.b, x40_x10.ub) //[0, 4]
s14_s11.w += vrmpy(w01.b, x41_x11.ub) //[0, 4]
x30_x00 = memd(ptr_x10++#2<<3) //[0, 4]load pts 0, 4
x31_x01 = memd(ptr_x11++#2<<3) //[0, 4]
} {
s03_s00.w += vrmpy(w00.b, x30_x00.ub) //[0, 5]acc 0,4,1,5 out 0
w00 = vmem(ptr_w0++#1) //[1, 0]1st 32 weights of out depth
x50_x20 = memd(ptr_x10+stride<<#2) //[1, 0]load pt 3 and 7
} {
s13_s10.w += vrmpy(w01.b, x31_x01.ub) //[0, 6]
w01 = vmem(ptr_w0++#1) //[1, 1]2nd 32weights stream 0
x51_x21 = memd(ptr_x11+stride<<#2) //[1, 1]
}:endloop0
{ dcfetch(fetch_ptr+#0<<6) //[1, 2]fetch 64bytes-2 lots 8 x 4 bytes
s05_s02.w += vrmpy(w00.b, x50_x20.ub) //[1, 2]macc 2,6 out 0
// adjust = memw(sp+#23<<2)
col_count = add(col_count, #-6) //decrement width count by 8
} {
s15_s12.w += vrmpy(w01.b, x51_x21.ub) //[1, 3]acc 2,3,6,7
x40_x10 = memd(ptr_x10+stride<<#1) //[1, 3]load pt 1 5
x41_x11 = memd(ptr_x11+stride<<#1) //[1, 3]
p2 = cmp.ge(col_count,#0) //
} {
s04_s01.w += vrmpy(w00.b, x40_x10.ub) //[1, 4]
s14_s11.w += vrmpy(w01.b, x41_x11.ub) //[1, 4]
x30_x00 = memd(ptr_x10+#0<<3) //[1, 4]load pts 0, 4
x31_x01 = memd(ptr_x11+#0<<3) //[1, 4]
} {
s03_s00.w += vrmpy(w00.b, x30_x00.ub) //[1, 5]acc 0,4,1,5 out 0
s13_s10.w += vrmpy(w01.b, x31_x01.ub) //[1, 5]
ones = ##0x01010101 //
}
/* ------------------------------------------------------------------------ */
{ s00.w = vadd(s00.w, s10.w) //combine reducnent accs
s01.w = vadd(s01.w, s11.w) //combine reducnent accs
s02.w = vadd(s02.w, s12.w) //combine reducnent accs
s03.w = vadd(s03.w, s13.w) //combine reducnent accs
} {
s04.w = vadd(s04.w, s14.w) //combine reducnent accs
s05.w = vadd(s05.w, s15.w) //combine reducnent accs
mine.w = vmin(mine.w, s00.w) //min accumulation
s0_sh.w = vasl(s00.w, zshift) //
} {
maxe.w = vmax(maxe.w, s00.w) //max accumulation
s1_sh.w = vasl(s01.w, zshift) //
// ptr_x0 = sub(ptr_x0, adjust) //-=filt_height if stride_height > filt_height
if (!p2) s05 = s00 //
} {
s00.w = vmpye(s0_sh.w, vrecip.uh) //
maxe.w = vmax(maxe.w, s01.w) //max accumulation
mine.w = vmin(mine.w, s01.w) //min accumulation
} {
s00.w += vmpyo(s0_sh.w, vrecip.h):SSR //
ptr_x0 += mpyi(stride, #12) //stride*2*4 advance buffer by 8 outputs
} {
s0_sh.w = vasl(s02.w, zshift) //o
s01.w = vmpye(s1_sh.w, vrecip.uh) //
align = sub(#128, ptr_z0) //
} {
maxe.w = vmax(maxe.w, s02.w) //max accumulation
mine.w = vmin(mine.w, s02.w) //min accumulation
s01.w += vmpyo(s1_sh.w, vrecip.h):SSR //
p0 = cmp.gt(col_count, #0) //will be back at saem place just advance
} {
maxe.w = vmax(maxe.w, s03.w) //max accumulation
s1_sh.w = vasl(s03.w, zshift) //o
s02.w = vmpye(s0_sh.w, vrecip.uh) //
} {
mine.w = vmin(mine.w, s03.w) //min accumulation
d010.h = vpack(s01.w, s00.w):sat //pack high 16bits of accs
s02.w += vmpyo(s0_sh.w, vrecip.h):SSR //
vplut = extractu(ptr_z0, #2, #5) //
} {
s0_sh.w = vasl(s04.w, zshift) //o
s03.w = vmpye(s1_sh.w, vrecip.uh) //
mine.w = vmin(mine.w, s04.w) //min accumulation
} {
s03.w += vmpyo(s1_sh.w, vrecip.h):SSR //
vplut = asl(ones, vplut) //
maxe.w = vmax(maxe.w, s04.w) //max accumulation
} {
mine.w = vmin(mine.w, s05.w) //min accumulation
s1_sh.w = vasl(s05.w, zshift) //o
s04.w = vmpye(s0_sh.w, vrecip.uh) //
} {
maxe.w = vmax(maxe.w, s05.w) //max accumulation
d032.h = vpack(s03.w, s02.w):sat //pack high 16bits of accs
s04.w += vmpyo(s0_sh.w, vrecip.h):SSR //
} {
s05.w = vmpye(s1_sh.w, vrecip.uh) //
fetch_ptr_base = add(ptr_x0, in_width_32) //fetch is next row ahead
wsum0 = vmem(ptr_filtsum+#0) //set 1st weight offset
} {
d03210.ub = vpack(d032.h, d010.h):sat //shift 16bits by zshift
s05.w += vmpyo(s1_sh.w, vrecip.h):SSR //
} {
p2 = cmp.gt(fetch_ptr_base, cbuf_eob) //if prefetch >= circ buffer wrap around
if(p2.new)fetch_ptr_base=sub(fetch_ptr_base,cbuf_size) //wrap fetch ptr around independently
s03_s00 = vcombine(wsum0,wsum0) //init sum0 and 4
} {
q1 = vand(vpred1, vplut) //
d054.h = vpack(s05.w, s04.w):sat //pack high 16bits of accs
} {
s04_s01 = vcombine(wsum0,wsum0) //init sum1 and 5
s05_s02 = vcombine(wsum0,wsum0) //init sum1 and 5
} {
d054.ub = vpack(vin_zero.h, d054.h):sat //shift 16bits by zshift
ptr_w0 = memw(sp+#13<<2) //access ptr weight
//ptr_equalize = memw(sp+#43<<2) //
} {
vmemu(ptr_z0+#0) = d03210
s13_s10 = #0 //init sum0 and 4
// loop1(.L_filt_height, filt_height) //setup vertical filte rloop
//lc1 = filt_height
maxo.w = vasl(maxe.w,zshift) // speculative maxe <<= zshift
} {
d054 = vror(d054, align) //
s14_s11 = #0 //init sum1 and 5
if(!p0)wscale = vrecip //
} {
s15_s12 = #0 //init sum1 and 5
if(q1) vmem(ptr_z0+#1):nt = d054 //store 0-3 even row
q2 = vand(vpred2, vplut) //
} {
if(q2) vmem(ptr_z0+#2):nt = d054 //store 4-7 even row
ptr_z0 = add(ptr_z0, #192) //
if (p0) jump .L_width //next 2 rows 8 points per row
}//endloop width
/*
----|--54|0000 0011
----|-54-|0000 0110
----|54--|0000 1100
---5|4---|0001 1000
*/
/* --------------------------------------------------------------------------- */
{ recip_level = memw(sp+#37<<2) //255/max
mine.w = vasl(mine.w,zshift)
maxe.w = vmpye(maxo.w, wscale.uh) //[1, 0]equalize max's
p0 = cmp.eq(out_height, #0) //are vertical lines done?
} {
col_count = #0x80000001 // init for maxe
mino.w = vmpye(mine.w, wscale.uh) //[1, 1]equalize min's
if(!p0)vrecip = vmem(recip_level++#1) //used to compress to 8bits 255/max
} {
ptr_z0 = abs(col_count) // init for mine
memw(sp+#37<<2) = recip_level //255/max
maxe.w+= vmpyo(maxo.w, wscale.h):SSR //[1, 2]equalize max's
} {
gmax.w = vmax(gmax.w, maxe.w)
mino.w+= vmpyo(mine.w,wscale.h):SSR //[1, 3]equalize min's
ptr_filtsum = add(ptr_filtsum, #128) //
} {
mine = vsplat(ptr_z0)
maxe = vsplat(col_count)
gmin.w = vmin(gmin.w, mino.w)
if(!p0) jump:nt .L_height //then go again
}
/* ------------------------------------------------------------------------ */
#if 0
.L_domax:
{ ptr_max = memw(sp+#36<<2) //get max/min ptr
cm4 = #-4 //define int based deal
} {
loop0(.L_peak, #4) //set up vec reduce
maxo_maxe = vdeal(maxe, maxe, cm4) //deal out odd and even
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //reduce
mino_mine = vdeal(mine, mine, cm4) //split out and and even min
} {
mine.w = vmin(mine.w, mino.w) //reduce mins by 2
} {
maxo_maxe = vdeal(maxe, maxe, cm4) //split out odd and even max
}:endloop0
{ maxe.w = vmax(maxo.w, maxe.w) //reduce max
vmem(ptr_max+#0) = maxe.new //store max
mino_mine = vdeal(mine, mine, cm4) //split out mins
} {
mine.w = vmin(mino.w, mine.w) //reduce mins to final 1
vmem(ptr_max+#1) = mine.new //store min
}
/* ------------------------------------------------------------------------ */
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#2<<2) //18,19
#else
{ vmem(ptr_max+#0) = gmax //store max
r17:16 = memd(sp+#0) //restore stack
} {
vmem(ptr_max+#1) = gmin //store min
r19:18 = memd(sp+#2<<2) //18,19
#endif
} {
r21:20 = memd(sp+#4<<2) //20,21
r23:22 = memd(sp+#6<<2) //22,23
} {
r25:24 = memd(sp+#8<<2) //24,25
r27:26 = memd(sp+#10<<2) //26,27
} {
dealloc_return //
}
/* ------------------------------------------------------------------------ */
.L_end:
/* ======================================================================== */
.size gvconv2dbbb_circ6_d32_v65_asm, .L_end-gvconv2dbbb_circ6_d32_v65_asm
|
XiaoMi/nnlib | 9,526 | hexagon/asm_src/to_d32_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTION
* to_d32 convert image of width,height,depth to d32 format each slice of depth
* for each point on own line.
*
* input 0-6 0123456012345601234560123456 <-how depth is transformed
* output 0000
* 1111
* 2222
* 3333
* 4444
* 5555
* 6666
*
* *depth multiple of 32
* *output width multiple of 4 and output aligned to 128
*
* CODESIZE
* 240 bytes
*
* C MODEL
*/
#if 0
void to_d32(
uint8_t * data, int width, uint8_t * data_d32, int next_width, int height, int depth)
{
int i,j,k;
for(i=0; i < height; i++)
{
for(j=0; j < width; j++)
{
for(k=0; k < depth; k++)
{
data_d32[(i*depth+(k/32)*32)*next_width+j*32+(k%32)] = data[(i*width+j)*depth+k];
}
}
}
return;
}
#endif
/* ---------------------------------------------------------------- */
.text
.global to_d32_asm
.balign 32
.type to_d32_asm, @function
to_d32_asm:
/* ---------------------------------------------------------------- */
#define ptr_ini r0 //input pointer normal depth wise
#define width r1 //num depths wide
#define out_d32_i r2 //output pointer into d32 array
#define next_width_d32 r3 //rnd(width,4)*32 - physical width of d32 array
#define height r4 //number of group of widths
#define depth r5 //depth of input
/* ---------------------------------------------------------------- */
#define count r18 //temp coutn to index into depths
#define depth_count r17 //number of 32s
#define depth_iters r10 //round up to neaest 4
#define horz_iters r13 //round horixontal upto mod 4
#define ptr_in0 r14 //temp input ptr
#define ptr_in1 r11 //temp input ptr
#define depth3 r12 //3 * depth
#define width_depth r15 //width * depth of input
#define out_d32_0 r16 //temp output pointer
#define out_d32_1 r8 //temp output pointer
#define c32 r6 //const to shuffle 32 block
#define c64 r7 //const to shuffle 64 block
#define in_width_depth r9 //width*depth of output(inc. pad)
/* ---------------------------------------------------------------- */
#define x03210 v0 //1st 4 32 depth chunks
#define x13210 v1 //2nd depth chunks
#define x23210 v2 //3rd depth chunks
#define x33210 v3 //4td depth chunks
#define x03322_x01100 v5:4 //shuffle 32 together
#define x03322 v5 //shuffle 32 together
#define x01100 v4 //shuffle 32 together
#define x13322_x11100 v7:6 //shuffle 32 together
#define x13322 v7 //shuffle 32 together
#define x11100 v6 //shuffle 32 together
#define x3333_x2222 v11:10 //shuffle 64 together
#define x3333 v11 //shuffle 64 together
#define x2222 v10 //shuffle 64 together
#define x1111_x0000 v9:8 //shuffle 64 together
#define x1111 v9 //shuffle 64 together
#define x0000 v8 //shuffle 64 together
/* ---------------------------------------------------------------- */
{ allocframe(#24) //
c32 = #-32 //set up shuffle size
c64 = #-64 //set up shuffle size
} {
memd(sp+#0) = r17:16 //stack
memd(sp+#8) = r19:18 //stack
in_width_depth = mpyi(width, depth) //size of each physical input line
depth_iters = lsr(depth, #5) //depth / 32
} {
width_depth=mpyi(next_width_d32,depth_iters) //size of each physical output line
depth_count = depth_iters //unrounded depth cnt
M0 = depth //set up CR
} {
depth_iters = add(depth_iters, #3) //round up to nearest 4
depth3 = mpyi(depth, #3) //
horz_iters = add(width, #3) //round horiontal to 4
} {
depth_iters = lsr(depth_iters, #2) //depth /128
depth3 = sub(#128, depth3) //retard point 128-3*depth
horz_iters = lsr(horz_iters, #2) //
} {
M1 = depth3 //
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_height:
{ height = add(height, #-1) //next height
out_d32_0 = out_d32_i //tmp out ptr
ptr_in0 = ptr_ini //tmp in ptr
loop1(.L_loop_width, horz_iters) //line loop
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_width:
{ out_d32_1 = out_d32_0 //
ptr_in1 = ptr_in0 //
loop0(.L_loop_depth4, depth_iters) //set up depth loop
count = add(depth_count, #4) //
}
/* ---------------------------------------------------------------- */
.balign 32
.L_loop_depth4: //do multiples of 4 of depth
{ x03210 = vmemu(ptr_in1++M0) //1st 128nyutes of depth 0
} {
x13210 = vmemu(ptr_in1++M0) //2nd 128nyutes of depth 0
} {
x23210 = vmemu(ptr_in1++M0) //3rd 128nyutes of depth 0
} {
x03322_x01100 = vshuff(x13210, x03210, c32) //
} {
x33210 = vmemu(ptr_in1++M1) //4th 128nyutes of depth 0
} {
count = add(count, #-4) //decrement depth count
} {
x13322_x11100 = vshuff(x33210, x23210, c32) //get even depths together
} {
p1 = cmp.gt(count, #1) //2 or mode?
} {
x1111_x0000 = vshuff(x11100, x01100, c64) //get 0's together
vmem(out_d32_1+#0) = x0000.new //
out_d32_1 = add(out_d32_1, next_width_d32) //
} {
if(p1) vmem(out_d32_1+#0) = x1111 //
if(p1)out_d32_1 = add(out_d32_1,next_width_d32) //
p1 = cmp.gt(count, #2) //3 or more
} {
x3333_x2222 = vshuff(x13322, x03322, c64) //
if(p1) vmem(out_d32_1+#0) = x2222.new //
if(p1)out_d32_1 = add(out_d32_1,next_width_d32) //
p1 = cmp.gt(count, #3) //4 or more?
} {
if(p1) vmem(out_d32_1+#0) = x3333 //
if(p1)out_d32_1 = add(out_d32_1,next_width_d32) //
}:endloop0
/* ---------------------------------------------------------------- */
{
out_d32_0 = add(out_d32_0, #128) //advance out depths by 128
ptr_in0 = addasl(ptr_in0, depth, #2) //advance input by 4 depths
}:endloop1
/* ---------------------------------------------------------------- */
{ out_d32_i = add(out_d32_i, width_depth) //
ptr_ini = add(ptr_ini, in_width_depth) //
p0 = cmp.eq(height, #0) //
if(!p0.new) jump:t .L_loop_height //new width
}
/* ---------------------------------------------------------------- */
{ r17:16 = memd(sp+ #0) //
r19:18 = memd(sp+ #8) //
} {
dealloc_return //
}
.L_end:
/*=============================================================================*/
.size to_d32_asm, .L_end-to_d32_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 3,653 | hexagon/asm_src/fcsuma_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
take aligned vector non multiple of 32, pad and also compute sum
*/
.text
.global fcsuma_asm
.type fcsuma_asm,@function
.p2align 6
fcsuma_asm:
#define ptr_in r0
#define width r1
#define ptr_sum r2
#define ws r5
#define spare r6
#define c1111 r7
#define vzero v0
#define d0 v2
#define d1 v3
#define s1_s0 v5:4
#define s1 v5
#define s0 v4
{
spare = and(width, #0x7f) // _____111
width = lsr(width, #7) // multiple of 128
} {
ws = #-4 //word deal
q0 = vsetq(spare) // _____111
p0 = cmp.eq(spare, #0) //it is a multple of 128
loop0(.L_sumnpad, width) //
} {
p1 = cmp.eq(width, #0) //less than 128
s0 = #0 //init sum
} {
vzero = #0 //
c1111 = #-1 //sum bytes to word and neg
if(p1) jump .L_spare_only //
}
.balign 32
.L_sumnpad:
{ d1 = vmemu(ptr_in++#1) //
} {
nop //
} {
s0.w += vrmpy(d1.ub, c1111.b) //
}:endloop0
.L_spare_only:
{ loop0(.L_sum, #5) //
d1 = vmemu(ptr_in+#0) //
} {
d1 = vmux(q0, d1, vzero) //
} {
if( p0)c1111 = #0 //
} {
s0.w += vrmpy(d1.ub, c1111.b) //
}
.L_sum:
{ s1_s0 = vdeal(s0, s0, ws) //
} {
s0.w = vadd(s0.w, s1.w) //
}:endloop0
{ vmem(ptr_sum+#0) = s0 //
jumpr r31 //
}
.L_end:
.size fcsuma_asm,.L_end-fcsuma_asm
|
XiaoMi/nnlib | 20,321 | hexagon/asm_src/avgpool_d32.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.file "avgpool_d32.S"
/*======================================================================*/
/* FUNCTIONS : avgpool_slice_hvx_3x3_stride1 */
/* */
/* DESCRIPTION */
/* Perform 3x3 avgerage on d32 format */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* STACK = bytes */
/* ASSUMPTIONS */
/* arrays are 128 byte aligned */
/* C MODEL */
/*======================================================================*/
/*=============================================================================*/
#define optr r0
#define iptr r1
#define in_next_row r2
#define out_next_row r3
#define out_vectors r4
#define out_lines r5
/*=============================================================================*/
#define c32 r6
#define c64 r7
#define c64_c32 r7:6
#define recip9 r8
#define c01 r9
#define iptr0 r10
#define optr0 r11
#define optr0_iptr0 r11:10
#define optr1 r12
#define scale r13
#define l2param_l r14
#define l2param_h r15
#define l2param r15:14
#define pfptr0 r16
#define pfptr1 r17
#define in_next_row_2 r28
#define out_alig in_next_row
#define offset scale
/*=============================================================================*/
#define sl0 v0
#define sl2 v1
#define dl2l0 v1:0
#define sl1 v2
#define sout0 v3
#define svsum00_e v4
#define svsum00_o v5
#define dvsum00 v5:4
#define svsum01_e v6
#define svsum01_o v7
#define dvsum01 v7:6
#define svsum10_e v8
#define svsum10_o v9
#define dvsum10 v9:8
#define svsum11_e v10
#define svsum11_o v11
#define dvsum11 v11:10
#define ssum0_e v12
#define ssum0_o v13
#define dsum0 v13:12
#define ssum1_e v14
#define ssum1_o v15
#define dsum1 v15:14
#define svs0d2_e v16
#define svs0d2_o v17
#define dvs0d2 v17:16
#define sout0_new v18
#define sout0_old v19
#define sout1_new v20
#define sout1_old v21
#define sl3 sl1
#define sout1 sout0
#define svs0d1_e svs0d2_e
#define svs0d1_o svs0d2_o
#define svs1d1_e svs0d2_e
#define svs1d1_o svs0d2_o
#define svs1d2_e svs0d2_e
#define svs1d2_o svs0d2_o
/*=============================================================================*/
#define SRS <<1:rnd:sat
/*=============================================================================*/
.text
.global avgpool_slice_hvx_3x3_stride1
.balign 32
.type avgpool_slice_hvx_3x3_stride1, @function
avgpool_slice_hvx_3x3_stride1:
{ p0 = cmp.gt(out_lines,#0) //
if !p0.new jumpr:nt r31 //
recip9 = ##0x0e390e39 //32768/9 (rounded) = 3641
// note: (255*9* 3641 + (1<<14))>>15 = 255 (no overflow).
}{
out_lines = asrrnd(out_lines,#1) //
m0 = in_next_row //
offset = #128 //
c64_c32 = combine(#64,#32) //
}{
offset -= mpyi(in_next_row,#3) //
in_next_row_2 = ASL(in_next_row,#1) //
out_alig = memw(sp+#0) //
sp = add(sp,#-8) //
}{
memd(sp+#0) = r17:16 //
m1 = offset //
c01 = ##0x01010101 //
}{
l2param_l = add(out_vectors,#1) //
l2param_h = #128 //
pfptr0 = addasl(iptr,in_next_row_2,#1) //
pfptr1 = addasl(iptr,in_next_row_2,#1) //
}{
pfptr1 += lsr(in_next_row_2,#1) //
l2param_l = combine(l2param_h.l,l2param_l.l) // l2param=(0|128|128|out_vectors+1)
loop1(.avgpool3x3s1_d32_outloop,out_lines) //
optr0_iptr0 = combine(optr,iptr) //
}
.balign 32
/*============================================================================*/
.avgpool3x3s1_d32_outloop:
{ optr1 = add(optr0,out_next_row) //
p0 = cmp.eq(out_lines,#1) // if (last iteration)
if p0.new l2param_l = #0 // then cancel l2fetch
sl0 = vmem(iptr0++m0) //
}{
l2fetch(pfptr0,l2param) //
}{
l2fetch(pfptr1,l2param) //
}{
sl1.tmp = vmem(iptr0++m0) //
dvsum00.h = vmpy(sl1.ub,c01.b) //
p3=sp1loop0(.avgpool3x3s1_d32_rowloop,out_vectors)//
}{
sl2.tmp = vmem(iptr0++m0) //
dvsum00.h += vmpa(dl2l0.ub,c01.b) // vertical summation
}{
sl3.tmp = vmem(iptr0++m1) //
dvsum11.h = vsub(sl3.ub,sl0.ub) //
scale = #0 // to set sout0/1_old = 0
}{
dvsum11.h = vadd(dvsum11.h,dvsum00.h) // vertical summation
ssum0_e = #0 // t0 set sout0_old = 0
}
.balign 32
/*============================================================================*/
.avgpool3x3s1_d32_rowloop:
{ sl0 = vmem(iptr0++m0) //[1, 0]
ssum0_o.h = vmpy(ssum0_o.h,scale.h):SRS //[2, 0]
ssum1_o.h = vadd(svsum10_o.h,svs1d1_o.h) //[2, 0]
}{
sl1.tmp = vmem(iptr0++m0) //[1, 1]
dvsum01.h = vmpy(sl1.ub,c01.b) //[1, 1]
svs1d2_e = valign(svsum11_e,svsum10_e,c64) //[2, 1]
sout0_new.b = vshuffe(ssum0_o.b,ssum0_e.b) //[2, 1]
}{
sl2.tmp = vmem(iptr0++m0) //[1, 2]
dvsum01.h += vmpa(dl2l0.ub,c01.b) //[1, 2]vertical summation
svs1d2_o = valign(svsum11_o,svsum10_o,c64) //[2, 2]
ssum1_e.h = vadd(ssum1_e.h,svs1d2_e.h) //[2, 2]
}{
sout0 = vlalign(sout0_new,sout0_old,out_alig)//[2, 3]
if p3 vmem(optr0++#1) = sout0.new //[2, 3]
ssum1_o.h = vadd(ssum1_o.h,svs1d2_o.h) //[2, 3]
dvsum10 = dvsum11 //[2, 3]
}{
sl3.tmp = vmem(iptr0++m1) //[1, 4]
dvsum11.h = vsub(sl3.ub,sl0.ub) //[1, 4]
svs0d1_e = valign(svsum01_e,svsum00_e,c32) //[1, 4]
sout0_old = sout0_new //[2, 4]
}{
dvsum11.h = vadd(dvsum11.h,dvsum01.h) //[1, 5]vertical summation
svs0d1_o = valign(svsum01_o,svsum00_o,c32) //[1, 5]
sout1_old = sout1_new //[2, 5]
}{
svs0d2_e = valign(svsum01_e,svsum00_e,c64) //[1, 6]
ssum0_e.h = vadd(svsum00_e.h,svs0d1_e.h) //[1, 6]
ssum1_e.h = vmpy(ssum1_e.h,scale.h):SRS //[2, 6]
}{
svs0d2_o = valign(svsum01_o,svsum00_o,c64) //[1, 7]
ssum0_o.h = vadd(svsum00_o.h,svs0d1_o.h) //[1, 7]
ssum1_o.h = vmpy(ssum1_o.h,scale.h):SRS //[2, 7]
}{
dsum0.h = vadd(dsum0.h,dvs0d2.h) //[1, 8]
svs1d1_e = valign(svsum11_e,svsum10_e,c32) //[1, 8]
sout1_new.b = vshuffe(ssum1_o.b,ssum1_e.b) //[2, 8]
}{
svs1d1_o = valign(svsum11_o,svsum10_o,c32) //[1, 9]
dvsum00 = dvsum01 //[1, 9]
scale = recip9 //[1, 9]
}{
ssum1_e.h = vadd(svsum10_e.h,svs1d1_e.h) //[1,10]
ssum0_e.h = vmpy(ssum0_e.h,scale.h):SRS //[1,10]
sout1 = vlalign(sout1_new,sout1_old,out_alig)//[2,10]
if p3 vmem(optr1++#1) = sout1.new //[2,10]
}:endloop0
/*============================================================================*/
{ ssum0_o.h = vmpy(ssum0_o.h,scale.h):SRS //[2, 0]
ssum1_o.h = vadd(svsum10_o.h,svs1d1_o.h) //[2, 0]
sout1_old = sout1_new //[2, 5]
}{
svs1d2_e = valign(svsum11_e,svsum10_e,c64) //[2, 1]
sout0_new.b = vshuffe(ssum0_o.b,ssum0_e.b) //[2, 1]
}{
svs1d2_o = valign(svsum11_o,svsum10_o,c64) //[2, 2]
ssum1_e.h = vadd(ssum1_e.h,svs1d2_e.h) //[2, 2]
}{
sout0 = vlalign(sout0_new,sout0_old,out_alig)//[2, 3]
if p3 vmem(optr0++#1) = sout0.new //[2, 3]
ssum1_o.h = vadd(ssum1_o.h,svs1d2_o.h) //[2, 3]
}{
ssum1_e.h = vmpy(ssum1_e.h,scale.h):SRS //[2, 6]
out_lines = add(out_lines,#-1) //
}{
ssum1_o.h = vmpy(ssum1_o.h,scale.h):SRS //[2, 7]
pfptr0 = add(pfptr0,in_next_row_2) //
pfptr1 = add(pfptr1,in_next_row_2) //
}{
sout1_new.b = vshuffe(ssum1_o.b,ssum1_e.b) //[2, 8]
iptr = add(iptr,in_next_row_2) //
optr += asl(out_next_row,#1) //
}{
sout1 = vlalign(sout1_new,sout1_old,out_alig)//[2,10]
if p3 vmem(optr1++#1) = sout1.new //[2,10]
optr0_iptr0 = combine(optr,iptr) //
}:endloop1
/*============================================================================*/
{ r17:16 = memd(sp+#0) //
sp = add(sp,#8) //
r0 = #0 //
jumpr r31 //
}
.avgpool_slice_hvx_3x3_stride1_end:
/*=============================================================================*/
.size avgpool_slice_hvx_3x3_stride1, .-avgpool_slice_hvx_3x3_stride1
/*=============================================================================*/
#define OUTPTR r0
#define INPTR r1
#define IN_ROW_STRIDE r2
#define OUT_STRIDE r3
#define OUT__IN_ROW_STRIDE r3:2
#define IN_STRIDE r4
#define ITERS r5
#define LALIGN_RECIP r7:6
#define LALIGN r7
#define RECIP r6
#define RONES r11
#define NEXT_INPTR r12
#define OUTER_NEXT_INPTR r13
#define WINDOW_W_H r15:14
#define WINDOW_W r15
#define WINDOW_H r14
#define LINE00_ACC v1:0
#define LINE00_ACC_H v1
#define LINE00_ACC_L v0
#define LINE04_ACC v3:2
#define LINE04_ACC_H v3
#define LINE04_ACC_L v2
#define RED_ACC v5:4
#define RED_ACC_H v5
#define RED_ACC_L v4
#define OUTER_ACC v7:6
#define OUTER_ACC_H v7
#define OUTER_ACC_L v6
#define PRODUCTH v9:8
#define PRODUCTH_H v9
#define PRODUCTH_L v8
#define PRODUCTL v11:10
#define PRODUCTL_H v11
#define PRODUCTL_L v10
#define OUT v29
#define LASTOUT v28
#define VZERO v30
#define TMP v31
/* avgpool_hvx_d32(OUTPTR,INPTR,IN_ROW_STRIDE,OUT_STRIDE,IN_STRIDE,ITERS,WINDOW_H,WINDOW_W, RECIP,LALIGN) */
/* Compute a vector (4x32) of WINDOW_HxWINDOW_W avgpooling, then go forward OUT/IN strides and iterate */
.global avgpool_hvx_d32
.type avgpool_hvx_d32,@function
.p2align 6
avgpool_hvx_d32:
{
WINDOW_W_H = memd(r29+#0)
c7:6 = OUT__IN_ROW_STRIDE
LALIGN_RECIP = memd(r29+#8)
VZERO = vxor(VZERO,VZERO)
}
{
LASTOUT = vxor(VZERO,VZERO)
RONES = ##0x01010101
loop1(.Lavgpool_hvx_d32_outer,ITERS)
}
#undef OUT_STRIDE
#undef IN_ROW_STRIDE
#undef OUT__IN_ROW_STRIDE
#undef ITERS
#define SHIFT r5
#define REDUC_AMT r3
/* Accumulate down height, reduce to 1x4 (x2) */
.Lavgpool_hvx_d32_outer:
{
loop0(.LNx4_first_inner,WINDOW_H)
OUTER_NEXT_INPTR = add(INPTR,IN_STRIDE)
NEXT_INPTR = add(INPTR,#128)
REDUC_AMT = add(WINDOW_W,#-1)
}
{
LINE00_ACC_L = VZERO
LINE00_ACC_H = VZERO
LINE04_ACC_L = VZERO
LINE04_ACC_H = VZERO
}
{
OUTER_ACC_H = VZERO
OUTER_ACC_L = VZERO
}
.LNx4_first_inner:
{
TMP.tmp = vmem(INPTR++M0)
LINE00_ACC.h += vmpy(TMP.ub,RONES.b)
}:endloop0
.Lavgpool_hvx_d32_reduc:
{
loop0(.LNx4_main_inner,WINDOW_H)
RECIP = combine(RECIP.L,RECIP.L)
INPTR = NEXT_INPTR
NEXT_INPTR = add(NEXT_INPTR,#128)
}
.LNx4_main_inner:
{
TMP.tmp = vmem(INPTR++M0)
LINE04_ACC.h += vmpy(TMP.ub,RONES.b)
}:endloop0
/* Reduce right up to 3 times */
{
RED_ACC = LINE00_ACC
SHIFT = #32
TMP = VZERO
p0 = cmp.gt(REDUC_AMT,#0)
if (!p0.new) jump:nt .Lred_done
}
{
TMP = valign(LINE04_ACC_H,LINE00_ACC_H,SHIFT)
RED_ACC_L.h = vadd(RED_ACC_L.h,TMP.h)
}
{
RED_ACC_H.h = vadd(RED_ACC_H.h,TMP.h)
TMP = valign(LINE04_ACC_L,LINE00_ACC_L,SHIFT)
SHIFT = add(SHIFT,#32)
p0 = cmp.gt(REDUC_AMT,#1)
if (!p0.new) jump:nt .Lred_done
}
{
TMP = valign(LINE04_ACC_H,LINE00_ACC_H,SHIFT)
RED_ACC_L.h = vadd(RED_ACC_L.h,TMP.h)
}
{
RED_ACC_H.h = vadd(RED_ACC_H.h,TMP.h)
TMP = valign(LINE04_ACC_L,LINE00_ACC_L,SHIFT)
SHIFT = add(SHIFT,#32)
p0 = cmp.gt(REDUC_AMT,#2)
if (!p0.new) jump:nt .Lred_done
}
{
TMP = valign(LINE04_ACC_H,LINE00_ACC_H,SHIFT)
RED_ACC_L.h = vadd(RED_ACC_L.h,TMP.h)
}
{
RED_ACC_H.h = vadd(RED_ACC_H.h,TMP.h)
TMP = valign(LINE04_ACC_L,LINE00_ACC_L,SHIFT)
SHIFT = add(SHIFT,#32)
}
.Lred_done:
{
RED_ACC_L.h = vadd(RED_ACC_L.h,TMP.h)
LINE00_ACC = LINE04_ACC
REDUC_AMT = add(REDUC_AMT,#-4)
}
{
p0 = cmp.gt(REDUC_AMT,#0)
if (p0.new) jump:t .Lavgpool_hvx_d32_reduc
OUTER_ACC.h = vadd(OUTER_ACC.h,RED_ACC.h)
}
/* Multiply OUTER_ACC_HL by reciprocal and pack back for output */
PRODUCTH.uw = vmpy(OUTER_ACC_H.uh,RECIP.uh)
PRODUCTL.uw = vmpy(OUTER_ACC_L.uh,RECIP.uh)
OUTER_ACC_H.h = vshuffo(PRODUCTH_H.h,PRODUCTH_L.h)
OUTER_ACC_L.h = vshuffo(PRODUCTL_H.h,PRODUCTL_L.h)
OUT.ub = vsat(OUTER_ACC_H.h,OUTER_ACC_L.h)
{
TMP = vlalign(OUT,LASTOUT,LALIGN)
vmem(OUTPTR++M1) = TMP.new
LASTOUT = OUT
INPTR = OUTER_NEXT_INPTR
}:endloop1
{
r0 = #0
jumpr r31
}
.size avgpool_hvx_d32,.-avgpool_hvx_d32
#undef OUTPTR
#undef INPTR
#undef R32
#undef RECIP
#undef LALIGN
#undef LALIGN_RECIP
#undef WINDOW_H
#undef WINDOW_W
#undef WINDOW_W_H
#undef IN_STRIDE
#undef SHIFT
#undef REDUC_AMT
#undef LINE00_ACC
#undef LINE00_ACC_H
#undef LINE00_ACC_L
#undef LINE04_ACC
#undef LINE04_ACC_H
#undef LINE04_ACC_L
#undef RED_ACC
#undef RED_ACC_H
#undef RED_ACC_L
#undef OUTER_ACC
#undef OUTER_ACC_H
#undef OUTER_ACC_L
#undef PRODUCTH
#undef PRODUCTH_H
#undef PRODUCTH_L
#undef PRODUCTL
#undef PRODUCTL_H
#undef PRODUCTL_L
#undef OUT
#undef LASTOUT
#undef VZERO
#undef TMP
#define DST r0
#define SRC0 r1
#define NEXT_ROW r2
#define SRC1 r3
#define COUNT r4
#define R32 r5
/*
* avgpool_zap_row(dstptr,srcptr,next_row_in_bytes)
*/
.global avgpool_zap_row
.type avgpool_zap_row,@function
.p2align 6
avgpool_zap_row:
{
SRC1 = add(SRC0,NEXT_ROW)
COUNT = lsr(NEXT_ROW,#7)
}
{
loop0(.Lzap_row_loop,COUNT)
R32 = #32
}
.Lzap_row_loop:
v0 = vmem(SRC0++#1)
{
v1.tmp = vmem(SRC1++#1)
v0.ub = vavg(v0.ub,v1.ub):rnd
vmem(DST++#1) = v0.new
}:endloop0
/* Write one more set of 32 bytes off the end, just in case */
{
v0 = vmem(SRC0+#0)
q0 = vsetq(R32)
}
{
v1.tmp = vmem(SRC1+#0)
v0.ub = vavg(v0.ub,v1.ub):rnd
}
{
if (q0) vmem(DST+#0) = v0
}
{
r0 = #0
jumpr r31
}
.size avgpool_zap_row,.-avgpool_zap_row
#undef DST
#undef SRC0
#undef NEXT_ROW
#undef SRC1
#undef COUNT
#define PTR r0
#define HEIGHT r1
#define WIDTH r2
#define LEFT_PAD r3
#define NEXT_ROW r4
#define LEFT_PTR r5
#define R_64 r7
#define R_32 r6
#define R_64_32 r7:6
#define ITERS r12
#define R_ONE r13
#define LEFT_DATA v0
#define RIGHT_DATA v1
#define LEFT_32 v2
#define RIGHT_32 v3
#define LEFT_TMP0 v10
#define LEFT_TMP1 v11
#define RIGHT_TMP0 v20
#define RIGHT_TMP1 v21
#define LEFT_Q_MASK q0
#define RIGHT_Q_MASK q1
#define V_ONE v31
#define V_ZERO v30
/*
* avgpool_zap_lr(ptr,height,width,left_pad,next_row_in_bytes)
* There is no if (Q) vmemu, so we need to grab both aligned chunks and store aligned
*/
.global avgpool_zap_lr
.type avgpool_zap_lr,@function
.p2align 6
avgpool_zap_lr:
{
R_64_32 = combine(#64,#32)
WIDTH = add(WIDTH,LEFT_PAD)
LEFT_PAD = add(LEFT_PAD,#-1)
R_ONE = #1
}
{
V_ONE = vsplat(R_ONE)
LEFT_PTR = addasl(PTR,LEFT_PAD,#5)
V_ZERO = vxor(V_ZERO,V_ZERO)
}
{
LEFT_DATA = vlalign(V_ONE,V_ZERO,R_32)
#define RIGHT_PTR r0
RIGHT_PTR = addasl(PTR,WIDTH,#5)
#undef PTR
}
{
RIGHT_DATA = vlalign(LEFT_DATA,V_ONE,RIGHT_PTR)
//ITERS = lsr(NEXT_ROW,#7)
}
{
LEFT_DATA = vlalign(LEFT_DATA,V_ONE,LEFT_PTR)
M0 = NEXT_ROW
}
{
LEFT_Q_MASK = vcmp.eq(LEFT_DATA.w,V_ZERO.w)
RIGHT_Q_MASK = vcmp.eq(RIGHT_DATA.w,V_ZERO.w)
loop0(.Lzap_lr_loop,HEIGHT)
}
.Lzap_lr_loop:
{
LEFT_32 = vmem(LEFT_PTR+#1)
}
{
RIGHT_32 = vmem(RIGHT_PTR+#-1)
}
{
LEFT_DATA.cur = vmem(LEFT_PTR+#0)
LEFT_TMP0 = valign(LEFT_32,LEFT_DATA,R_32)
}
{
RIGHT_DATA.cur = vmem(RIGHT_PTR+#0)
RIGHT_TMP0 = vlalign(RIGHT_DATA,RIGHT_32,R_32)
}
{
LEFT_TMP1 = valign(LEFT_32,LEFT_DATA,R_64)
}
{
RIGHT_TMP1 = vlalign(RIGHT_DATA,RIGHT_32,R_64)
LEFT_TMP0.ub = vavg(LEFT_TMP0.ub,LEFT_TMP1.ub):rnd
}
{
if (LEFT_Q_MASK) vmem(LEFT_PTR++M0) = LEFT_TMP0
RIGHT_TMP0.ub = vavg(RIGHT_TMP0.ub,RIGHT_TMP1.ub):rnd
}
{
if (RIGHT_Q_MASK) vmem(RIGHT_PTR++M0) = RIGHT_TMP0
}:endloop0
{
jumpr r31
r0 = #0
}
.size avgpool_zap_lr,.-avgpool_zap_lr
|
XiaoMi/nnlib | 6,031 | hexagon/asm_src/d32_16_to_88.S | /*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#define ine8_ptr r0
#define poute ine8_ptr
#define ino8_ptr r1
#define pouto ino8_ptr
#define in16_ptr r2
#define pinput in16_ptr
#define width r3
#define height r4
#define depth r5
#define c_1 r6
#define c127 r8
#define lpcnt c127
#define c128 r9
#define sIn0 v0
#define sIn1 v1
#define sOut_L v2
#define sOut_H v3
#define dOut v3:2
#define sPrev0 v4
#define sShift0 v5
#define dOut2 v7:6
#define sOut2_L v6
#define sOut2_H v7
.text
.global d32_16_to_88_cn
.type d32_16_to_88_cn,@function
.p2align 6
d32_16_to_88_cn:
{
c127 = #127 //
c_1 = #-1 //
width = asr(width,#2) //
depth = asr(depth,#5) //
}
{
c128 = #128 //
p0 = bitsclr(in16_ptr,c127) //
lpcnt = mpyi(width,depth) //
}
{
lpcnt = mpyi(lpcnt,height) //
if (!p0) jump d32_16_to_88_cn_unalign //
}
{
p3 = sp1loop0(d32_16_to_88_cn_A_lp,lpcnt) //
}
d32_16_to_88_cn_A_lp:
{
sIn0 = vmem(pinput++#1) //
vmem(poute+#0) = sOut_L //
if (p3) poute = add(poute,c128) //[2]
}
{
if (p3) vmem(pouto++#1) = sOut_H //[2]
}
{
sIn1.tmp = vmem(pinput++#1) //
dOut = vdeal(sIn1,sIn0,c_1) //
}:endloop0
{
vmem(poute++#1) = sOut_L //
}
{
vmem(pouto++#1) = sOut_H //
jumpr r31 //
}
.p2align 6
d32_16_to_88_cn_unalign:
{
sIn1 = vmem(pinput++#1) //
p3 = sp1loop0(d32_16_to_88_cn_B_lp,lpcnt) //
}
d32_16_to_88_cn_B_lp:
{
sIn0.cur = vmem(pinput++#1) //
sShift0 = valign(sIn0, sIn1, pinput) //
}
{
dOut2 = vdeal(sOut_H,sOut_L,c_1) //[2]
if (p3) vmem(poute++#1) = sOut2_L.new //[2]
sOut_L = sShift0 //
}
{
sIn1.cur = vmem(pinput++#1) //
sOut_H = valign(sIn1, sIn0, pinput) //
vmem(pouto+#0) = sOut2_H //[2]
if (p3) pouto = add(pouto,c128) //[2]
}:endloop0
{
dOut2 = vdeal(sOut_H,sOut_L,c_1) //[e]
vmem(poute++#1) = sOut2_L.new //[e]
}
{
vmem(pouto++#1) = sOut2_H //[e]
jumpr r31 //
}
.size d32_16_to_88_cn,.-d32_16_to_88_cn
|
XiaoMi/nnlib | 9,130 | hexagon/asm_src/padzap_d32.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* Pad Zapping for D32
*/
/*
* Pad Zap Top/Bottom is just vmemset_asm
*/
/*
* padzap_whole(uint8_t *start, uint8_t val, int d32_stride, int d32_iters, int row_stride, int row_iters)
*/
#define PTR r0
#define VAL r1
#define D32_STRIDE r2
#define D32_ITERS r3
#define ROW_STRIDE r4
#define ROW_ITERS r5
#define TMP r28
#define VVAL v0
.text
.global padzap_whole
.type padzap_whole,@function
.p2align 6
padzap_whole:
{
loop1(.Lpadzap_whole_rowloop,ROW_ITERS) //
VAL = vsplatb(VAL) //
}
{
loop0(.Lpadzap_whole_d32loop,D32_ITERS) //
TMP = mpyi(D32_STRIDE,D32_ITERS) //
nop //
}
{
M0 = D32_STRIDE //
VVAL = vsplat(VAL) //
ROW_STRIDE = sub(ROW_STRIDE,TMP) //
}
.Lpadzap_whole_rowloop:
.Lpadzap_whole_d32loop:
{
vmem(PTR++M0):nt = VVAL //
}:endloop0
{
loop0(.Lpadzap_whole_d32loop,D32_ITERS) //
PTR = add(PTR,ROW_STRIDE) //
}:endloop1
{
r0 = #0 //
jumpr r31 //
}
.size padzap_whole,.-padzap_whole
/*
* padzap_part(uint8_t *start, uint8_t val, int d32_stride, int d32_iters, int row_stride, int row_iters, int w)
* Zap 32*W elements starting @ start, which might not be aligned.
* It is not allowed to span to a new vector.
*/
#define PTR r0
#define VAL r1
#define D32_STRIDE r2
#define D32_ITERS r3
#define ROW_STRIDE r4
#define ROW_ITERS r5
#define W r6
#define PTR_OFF r7
#define TMP r28
#define VVAL v0
#define VMASK v1
#define VZERO v2
#define QMASK q0
.text
.global padzap_part
.type padzap_part,@function
.p2align 6
padzap_part:
{
VAL = vsplatb(VAL) // value to zap with
W = memw(r29+#0) // width in D32
VZERO = #0 //
VMASK = vsplat(PTR) // guaranteed non NULL
}
{
loop0(.Lpadzap_part_d32loop,D32_ITERS) // inner loop setup
PTR_OFF = and(PTR,#127) //
PTR = and(PTR,#-128) //
W = mpyi(W,#-32) // 128-W*32
}
{
VVAL = vsplat(VAL) // value to zap with
VMASK = valign(VZERO,VMASK,W) // insert W*32 bytes into low part
}
{
M0 = D32_STRIDE //
TMP = mpyi(D32_STRIDE,D32_ITERS) //
VMASK = vlalign(VMASK,VZERO,PTR_OFF) // Move low part according to alignment
}
{
loop1(.Lpadzap_part_rowloop,ROW_ITERS) // outer loop setup
QMASK = vcmp.gt(VMASK.uw,VZERO.uw) // 1 if nonzero
ROW_STRIDE = sub(ROW_STRIDE,TMP) //
}
.Lpadzap_part_rowloop:
.Lpadzap_part_d32loop:
{
if (QMASK) vmem(PTR++M0):nt = VVAL //
}:endloop0
{
loop0(.Lpadzap_part_d32loop,D32_ITERS) //
PTR = add(PTR,ROW_STRIDE) //
}:endloop1
{
r0 = #0 //
jumpr r31 //
}
.size padzap_part,.-padzap_part
//==================================================================================
#define start r0
#define val r1
#define d32_stride r2
#define d32_iters r3
#define row_stride r4
#define row_iters r5
#define width r6
#define ptr_off2 width
#define ptr_off_w r7
#define ptr_off r8
#define minusone r9
#define start_align r10
#define start_align0 r11
#define mask0 q0
#define mask1 q1
#define sZero v0
#define sMinusOne v1
#define sVal v2
#define sMask v3
.text
.global padzap16_part
.type padzap16_part,@function
.p2align 6
padzap16_part:
{
val = combine(val.l,val.l) //
width = memw(r29+#0) //
ptr_off = and(start,#127) //
d32_stride = add(d32_stride,d32_stride) //
}
{
minusone = #-1 //
start_align = and(start,#-128) //
ptr_off_w = addasl(ptr_off,width,#6) //
width = asl(width,#6) //
}
{
sZero = #0 //
sVal = vsplat(val) //
sMinusOne = vsplat(minusone) //
p0 = cmp.gtu(ptr_off_w,#128) //
}
{
if p0 jump padzap16_part_2 //
width = neg(width) //
m0 = d32_stride //
row_stride = add(row_stride,row_stride) //
}
{
sMask = valign(sZero, sMinusOne, width) //
ptr_off2 = ptr_off //
}
{
sMask = vlalign(sMask, sZero, ptr_off2) //
loop1(padzap16_part_A_lp,row_iters) //
}
{
mask0 = vcmp.gt(sMask.ub, sZero.ub) //
loop0(padzap16_part_A_lp,d32_iters) //
start_align0 = add(start_align,row_stride) //
}
padzap16_part_A_lp:
{
if (mask0) vmem(start_align++m0):nt = sVal //
}:endloop0
{
loop0(padzap16_part_A_lp,d32_iters) //
start_align = start_align0 //
start_align0 = add(start_align0,row_stride) //
}:endloop1
{
jumpr r31 //
}
.p2align 6
padzap16_part_2:
{
ptr_off_w = neg(ptr_off_w) //
mask0 = vsetq(ptr_off) //
p0 = cmp.eq(row_stride,d32_stride) //
}
{
sMask = valign(sZero, sMinusOne, ptr_off_w) //
loop1(padzap16_part_B_lp,row_iters) //
if (p0) jump padzap16_part_3 //
}
{
mask0 = not(mask0) //
mask1 = vcmp.gt(sMask.ub,sZero.ub) //
loop0(padzap16_part_B_lp,d32_iters) //
start_align0 = add(start_align,row_stride) //
}
padzap16_part_B_lp:
{
if (mask1) vmem(start_align+#1):nt = sVal //
}
{
if (mask0) vmem(start_align++m0):nt = sVal //
}:endloop0
{
loop0(padzap16_part_B_lp,d32_iters) //
start_align = start_align0 //
start_align0 = add(start_align0,row_stride) //
}:endloop1
{
jumpr r31 //
}
.p2align 6
padzap16_part_3:
{
mask0 = not(mask0) //
mask1 = vcmp.gt(sMask.ub,sZero.ub) //
loop0(padzap16_part_C_lp,row_iters) //
start_align0 = add(start_align,row_stride) //
}
padzap16_part_C_lp:
{
if (mask1) vmem(start_align+#1):nt = sVal //
}
{
if (mask0) vmem(start_align++m0):nt = sVal //
}:endloop0
{
jumpr r31 //
}
.size padzap16_part,.-padzap16_part |
XiaoMi/nnlib | 7,843 | hexagon/asm_src/gvsuma_h.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
DESCRIPTION
Perform the sum of a square of activations using integral as input. The correction
factor const - sum( a(,)*filt_offset.
*/
/* --------------------------------------------------------------------------------------- */
.text
.global gvsuma_asm
.balign 32
.type gvsuma_asm, @function
gvsuma_asm:
/* --------------------------------------------------------------------------------------- */
#define ptr_xi r0 //integral input ints
#define ptr_zi r1 //filter output
#define integral_width r2 //pad_l+in_width+pad_r
#define next_int_width r3 //distance to next output > (in_width + 31)&~31
#define stride_h r4 //vertical stride
#define filt_width r5 //filter width
#define filt_height r6 //rows of filter
#define out_height r7 //number of required output rows
#define offset r8 //K*in_depth*filt_offset*in_offset
#define in_ptrT0 r0 //top row base of filter (1 above actual filter)
#define in_ptrT r9 //top row of filter (1 above actual filter)
#define in_ptrB0 r10 //bottom row base of filter on actual filter)
#define in_ptrB r11 //bottom row of filter on actual filter)
#define out_ptr r13 //temp ptr for output
#define out_width r12 //number of elements to compute on this row
/* --------------------------------------------------------------------------------------- */
#define topLeft v0 //
#define botLeft v1 //
#define Left v2 //
#define topRight v4 //
#define botRight v5 //
#define Right v3 //
#define align_pw v6 //
#define align_m1 v7 //
#define filt_out v8 //
#define voffset v9 //
#define vzero v11 //
/* --------------------------------------------------------------------------------------- */
{ filt_height = memw(sp+#0<<2) //
out_width = lsr(integral_width, #5) //1 / 32
out_height = memw(sp+#1<<2) //
} {
offset = memw(sp+#2<<2) //
stride_h = mpyi(stride_h, next_int_width) //
filt_height = mpyi(filt_height,next_int_width)//
out_width = add(out_width, #-1) //
} {
topLeft = vmem(in_ptrT0+#0) //[P, 0] t31__t00
in_ptrT = add(in_ptrT0, #128) //[P, 0]
in_ptrB0 = add(ptr_xi, filt_height) //
} {
botLeft.tmp = vmem(in_ptrB0+#0) //[P, 1] b31__b00
Left.w = vsub(botLeft.w, topLeft.w) //[P, 1] t - b 01234567
loop1(.L_height, out_height) //set up inner loop of horz sum
} {
voffset = vsplat(offset) //[P, 2]K*in_offset*filt_offset
in_ptrT0 = add(in_ptrT0, stride_h) //[P, 2]
in_ptrB = add(in_ptrB0, #128) //[P, 1]
} {
in_ptrB0 = add(in_ptrB0, stride_h) //[P, 2]
filt_width = asl(filt_width, #2) //align to filt_w wirds
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_height:
{ topRight = vmem(in_ptrT++#1) //[0, 0]b63__b32
loop0(.L_width, out_width) //set up inner loop of horz sum
} {
botRight.tmp = vmem(in_ptrB++#1) //[0, 1]
Right.w = vsub(botRight.w, topRight.w) //[0, 1]t63__t32
} {
out_ptr = ptr_zi //
p3 = cmp.eq(out_width, #0) //deal with xcase width <= 32
if(p3.new) jump:nt .L_skip //deal with xcase width <= 32
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_width:
{ align_pw = valign(Right, Left, filt_width) //[0, 3]
topRight = vmem(in_ptrT++#1) //[1, 0]b63__b32
} {
filt_out.w = vsub(align_pw.w, Left.w) //[0, 4]
Left = Right //[0, 4]
botRight.tmp = vmem(in_ptrB++#1) //[1, 1]
Right.w = vsub(botRight.w, topRight.w) //[1, 1]t63__t32
} {
filt_out.w = vsub(voffset.w, filt_out.w) //[0, 5]
vmem(out_ptr++#1) = filt_out.new //[0, 5]
}:endloop0
/* --------------------------------------------------------------------------------------- */
.L_skip:
{ align_pw = valign(Right, Left, filt_width) //[1, 3]
topLeft = vmem(in_ptrT0+#0) //[P, 0] t31__t00
in_ptrT = add(in_ptrT0, #128) //[P, 0]
ptr_zi = add(ptr_zi, next_int_width) //update output pointer
} {
filt_out.w = vsub(align_pw.w, Left.w) //[1, 4]
botLeft.tmp = vmem(in_ptrB0+#0) //[P, 1] b31__b00
Left.w = vsub(botLeft.w, topLeft.w) //[P, 1] t - b 01234567
in_ptrT0 = add(in_ptrT0, stride_h) //[P, 1]
} {
vmem(out_ptr+#0) = filt_out.new //[E, 6]
filt_out.w = vsub(voffset.w, filt_out.w) //[1, 5]
in_ptrB = add(in_ptrB0, #128) //[P, 2]
in_ptrB0 = add(in_ptrB0, stride_h) //[P, 2]
}:endloop1
/* --------------------------------------------------------------------------------------- */
{
jumpr r31 //
}
/* --------------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------------- */
.L_end:
.size gvsuma_asm, .L_end-gvsuma_asm |
XiaoMi/nnlib | 7,925 | hexagon/asm_src/gvmacca_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmaccimw_asm */
/* */
/* DESCRIPTION */
/* sum of data vectors for support of 2d convolution */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/* DJH 11/04/16 convert to stream data input */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> filt_width*depth*N/16+3*N/4+12 */
/* */
/* MEMORY */
/* CODESIZE = 144 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 4byte aligned */
/* N%1=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmaccimw_asm(uint8 * a, int * c, int N, int K, uchar y_offset) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
sum += a_val ;
}
c[i*M+j] += sum*y_offset ;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvmaccimw_h.S"
.global gvmaccimw_asm
.balign 32
.type gvmaccimw_asm, @function
gvmaccimw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_xsum r1 //results
#define out_width r2 //out_width
#define skip_back r3 //skip to next line wrt stride, pad, filt_width & depth
#define stride r4 //stride*depth
#define filt_width r5 //elements in the filter length
#define out_height r6 //number of vertical lines to perform
#define filt_offset r7 //8bit value to be subtracted
/*=============================================================================*/
#define ki r9 //
#define ptr_x0 r10 //
#define sum r11 //
#define sum1_sum0 r17:16//
#define sum1 r17 //
#define sum0 r16 //
//
#define x07x04x03x00 r13:12//
#define x07x04 r13 //
#define x03x00 r12 //
#define x0fx0cx0bx08 r15:14//
#define x0fx0c r15 //
#define x0bx08 r14 //
/*=============================================================================*/
{
out_height = memw(sp+#0<<2) //
filt_offset = memw(sp+#1<<2) //
} {
allocframe(#16) //
ki = lsr(filt_width, #4) //k / 16
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
ki = add(ki, #-1) //
}
/*============================================================================*/
.balign 32
.L_height:
{
loop1(.L_width, out_width) //[ , P]for(i=0; i < n; i+=4){
out_height = add(out_height, #-1)
}
.balign 32
.L_width:
{
ptr_x0 = ptr_x
ptr_x = add(ptr_x, stride) //ptr_x += stride
loop0(.L_filt_width, ki) //[P, 9]ki is k1/4 - 2
} {
sum1_sum0 = combine(#0, #0) //
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 0]
x07x04x03x00 = memd(ptr_x0++#16) //[0, 0]
}
.balign 32
.L_filt_width:
{
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1,0]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 1]
x07x04x03x00 = memd(ptr_x0++#16) //[0, 1]
}:endloop0
{
sum1_sum0 += vraddub(x0fx0cx0bx08, x07x04x03x00) //[1,1]
} {
sum0 = add(sum0, sum1)
sum = memw(ptr_xsum+#0<<2) //
} {
sum += mpyi(sum0, filt_offset) //
} {
memw(ptr_xsum++#1<<2) = sum //[E, ]
}:endloop1
{
ptr_x = add(ptr_x, skip_back) //[E, ]next line
p1 = cmp.eq(out_height, #0)
if(!p1.new) jump:t .L_height
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvmaccimw_asm, .L_end-gvmaccimw_asm
|
XiaoMi/nnlib | 35,418 | hexagon/asm_src/dwconv2dbbb_unsigned_d32_v60_h.S | /*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements along depth, do only simple
* convolution.
* Sums are scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 07/ 6/17 created
*
* MEMORY
* CODESIZE = 640 bytes + 512 bytes of control tables
* STACK = 80 bytes
* ASSUMPTIONS
* width multiple of 4 depth multiple of 32 aligned to 128bytes
* MODEL
#if 0
* void dwconv2d_cn(
* uint8_t * in_buf, int in_width, int in_height, int depth,
* int stride_width, int stride_height,
* int in_offset, int8_t * filt, int filt_width, int filt_height, int filt_offset,
* int * out_buf, int out_width, int out_height, int adj_x, int adj_y)
* {
* int out_y, in_y_base, out_x, in_x_base;
* int out_z, filt_z, filt_y, filt_x, in_element, filt_element, sum;
* int * outstripe;
* uint8_t * instripe;
* uint8_t * filtstripe;
*
* for (out_y = 0; out_y < out_height; out_y++) {
* in_y_base = out_y * stride_height - adj_y;
* for (out_x = 0; out_x < out_width; out_x++) {
* in_x_base = out_x * stride_width - adj_x;
* outstripe = out_buf+(depth*(out_x+ out_width*out_y));
* for (out_z = 0; out_z < depth; out_z++) {
* sum = 0;
* for (filt_y = 0; filt_y < filt_height; filt_y++) {
* if ((in_y_base + filt_y) >= in_height) continue;
* if ((in_y_base + filt_y) < 0) continue;
* for (filt_x = 0; filt_x < filt_width; filt_x++) {
* if ((in_x_base + filt_x) >= in_width) continue;
* if ((in_x_base + filt_x) < 0) continue;
*
* filtstripe = filt+(depth*(filt_x+ filt_width*filt_y));
* filt_element = filtstripe[out_z] - filt_offset;
*
* instripe = in_buf+depth*(in_x_base+filt_x+in_width*(in_y_base+filt_y));
* in_element = instripe[out_z] - in_offset;
*
* sum += in_element*filt_element;
* }
* }
* outstripe[out_z] = sum;
* }
* }
* }
* return;
* }
#endif
*/
#if 0
/* =========================================================================== */
.text
.file "dwconv2dbbb_unsigned_d32_v60_h.S"
.global dwconv2dbbb_unsigned_v60_asm
.balign 32
.type dwconv2dbbb_unsigned_v60_asm, @function
dwconv2dbbb_unsigned_v60_asm:
/* =========================================================================== */
//h stride assumed 1 vstride 1 or 2 filt width assumed 3 - hstride 2 requires new function
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define next_in_width_depth r3 //width*depth*(stride_horz==1)
#define next_out_width_depth r4 //next output line amount in bytes
#define next_in_width_32 r5 //width*32*(stride_horz==1)
#define next_out_width_32 r16 //0next output line amount in bytes
#define in_depth r17 //1 total in depth split into rows of depth 32
#define out_width r18 //2is amount of work to be done
#define out_height r19 //3 number of vertical lines to perform
#define filt_height r20 //4 filt_height lines per filter
#define ptr_max r21 //5 maximum and minum buffer
#define recip_level r22 //6 255 / (MAX - MIN) - used to scale to bytes
#define filt_sumi r23 //7 gemsumb
#define stride_vert r24 //8 vertical stride is an option to save ops
#define zshift r6 //9 spare input
#define perm_ctrl r25 //10 ptr to the fancy data shuffling controls
#define filt_offset r25 //
#define filt_off210_ r25 //
#define filt_off_210 r21 //
//-----------------------------------------------------------------
#define s8 r7 //const = 8
#define c4 r6 //deal words
#define out_in_wide_deep_128 r9:8 //
#define out_wide_deep_128 r9 //advance ptr 128 along and pack to current line start
#define in_wide_deep_high_128 r8 //width*depth*filt_height - 128
#define depth r10 //current depth used
#define ptr_x0 r12 //
#define ptr_z0 r13 //
#define ptr_z0_ptr_x0 r13:12 //
#define ptr_x1 r11 //
#define ptr_z1 r14 //
#define ptr_w r15 //
#define filt_sum r22 //
#define col_count r26 //
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0 //
#define woffset v1 //
#define s00 v2 //
#define s01 v3 //
#define s01s00 v3:2 //
#define s10 v4 //
#define s11 v5 //
#define s01_s v18
#define s11s10 v5:4 //
#define d1_d0 v11:10 //
#define d3_d2 v13:12 //
#define d0 v10 //
#define d1 v11 //
#define d2 v12 //
#define d3 v13 //
#define d1d0 v8 //
#define d3d2 v16 //
#define d3_d0 v17 //
#define perm2 v6 //
#define perm3 v7 //
#define y0 v21 //
#define y1 v24 //
#define x0 v29 //
#define x1 v9 //
#define z3210 v26 //
#define z5432 v28 //
#define z5476 v27 //
#define w_210 v22 //
#define u_210 v2 //
#define w210_ v23 //
#define maxo_maxe v31:30 //
#define mino_mine v15:14 //
#define maxe v30 //
#define mine v14 //
#define maxo v31 //
#define mino v15 //
#define sa0 v15 //
#define sa1 v31 //
#define sa2 v10 //
#define sa3 v11 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ allocframe(#72) //0th entry on stack (72+8)/4=20
maxe = #0 //
s8 = #8 //shift by 8
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
r23 = ##0x7fffffff //max pos
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
mine = vsplat(r23) //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
} {
perm_ctrl = memw(sp+#30<<2) //
next_out_width_32 = memw(sp+#20<<2) //
maxe.w = vsub(maxe.w, mine.w) //
} {
in_depth = memw(sp+#21<<2) //
out_width = memw(sp+#22<<2) //
} {
out_height = memw(sp+#23<<2) //
recip_level = memw(sp+#26<<2) //
sa0 = #0 //
} {
vrecip = vsplat(recip_level) //
perm2 = vmem(perm_ctrl+#0) //
ptr_z0 = ptr_zi //
sa1 = #0 //
} {
filt_sumi = memw(sp+#27<<2) //
perm3 = vmem(perm_ctrl+#1) //
out_wide_deep_128=add(next_out_width_depth,#-128)//
in_wide_deep_high_128=add(next_in_width_depth,#-128)//
} {
zshift = memw(r29+#29<<2) //
filt_offset = memw(sp+#31<<2) //
sa2 = #0 //
sa3 = #0 //
} {
filt_height = memw(sp+#24<<2) //
in_depth = lsr(in_depth, #5) //1/32
col_count = out_width //
filt_offset = vsplatb(filt_offset) //
} {
filt_height = add(filt_height, #-1) //correct for vertical loop
ptr_x0 = ptr_xi //
filt_off210_ = asl(filt_offset, #8) //
filt_off_210 = lsr(filt_offset, #8) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_height:
.L_width:
{ loop1(.L_depth, in_depth) //number of 32 depths
woffset.cur = vmem(filt_sumi+#0) //
ptr_z1 = ptr_z0 //
s01s00 = vcombine(woffset,woffset) //filter offset * xoffset and bias
} {
loop0(.L_vloop, filt_height) //can have a filter of Nx3 stride=1
ptr_x1 = ptr_x0 //
x0.tmp = vmem(ptr_x0+#0) //[0,0]
y0.b = vshuff(x0.b) //[0,0]
} {
x1.tmp = vmem(ptr_x1+#1) //[0,1]
y1 = vrdelta(x1, perm2) //[0,1]
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,1]move to next pt in same depth position
} {
z3210.b = vshuff(y0.b) //[0,2]x3210
w_210 = vmem(ptr_wi+#0) //[0,2]
s11s10 = vcombine(woffset,woffset) //
filt_sum = add(filt_sumi, #128) //
} {
z5476 = vdelta(y1, perm3) //[0,3]x7654
u_210.tmp = vmem(ptr_wi+#0) //[0,3]
w210_.w = vasl(u_210.w, s8) //[0,3]
ptr_w = add(ptr_wi, #128) //restart filter stream
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_depth:
.L_vloop:
{ s00.uw += vrmpy(z3210.ub, w_210.ub) //[0,4]filter even output
z5432.h = vshuffo(z5476.h, z3210.h) //[0,4]
x0.tmp = vmem(ptr_x1+#0) //[1,0]
y0.b = vshuff(x0.b) //[1,0]
} {
s01.uw += vrmpy(z3210.ub, w210_.ub) //[0,5]z3210filter oddn output
x1.tmp = vmem(ptr_x1+#1) //[1,1]
y1 = vrdelta(x1, perm2) //[1,1]
ptr_x1 =add(ptr_x1, next_in_width_depth) //[1,1]move to next pt in same depth position
} {
sa0.uw += vrmpy(z3210.ub, filt_off_210.ub) //[1,2]z3210filter oddn output
sa1.uw += vrmpy(z3210.ub, filt_off210_.ub) //[1,2]z3210filter oddn output
} {
s10.uw += vrmpy(z5432.ub, w_210.ub) //[0,7]z5432
z3210.b = vshuff(y0.b) //[1,3]x3210
w_210 = vmem(ptr_w+#0) //[1,3]
} {
s11.uw += vrmpy(z5432.ub, w210_.ub) //[0,8]z5432
z5476 = vdelta(y1, perm3) //[1,4]x7654
u_210.tmp = vmem(ptr_w++#1) //[1,4]
w210_.w = vasl(u_210.w, s8) //[1,4]
} {
sa2.uw += vrmpy(z5432.ub, filt_off_210.ub) //[1,5]z3210filter oddn output
sa3.uw += vrmpy(z5432.ub, filt_off210_.ub) //[1,5]z3210filter oddn output
}:endloop0 //max accumulator=9*255=8f7=12bits-2^24
/* --------------------------------------------------------------------------- */
{ s00.uw += vrmpy(z3210.ub, w_210.ub) //[1,6]z3210
z5432.h = vshuffo(z5476.h, z3210.h) //[1,6]
ptr_x0 = add(ptr_x0, next_in_width_32) //update input ptr to next depth position
loop0(.L_vloop, filt_height) //can have a filter of Nx3 stride = 1
} {
sa0.uw += vrmpy(z3210.ub, filt_off_210.ub) //[1,7]z3210filter oddn output
sa1.uw += vrmpy(z3210.ub, filt_off210_.ub) //[1,7]z3210filter oddn output
} {
s00.w = vsub(s00.w, sa0.w) //
s01.uw += vrmpy(z3210.ub, w210_.ub) //[1,8]z3210
} {
s01.w = vsub(s01.w, sa1.w) //
s10.uw += vrmpy(z5432.ub, w_210.ub) //[1,9]z5432
maxe.w = vmax(maxe.w, s00.w) //find max
} {
sa2.uw += vrmpy(z5432.ub, filt_off_210.ub) //[1,10]z3210filter oddn output
sa3.uw += vrmpy(z5432.ub, filt_off210_.ub) //[1,10]z3210filter oddn output
mine.w = vmin(mine.w, s00.w) //find min
maxe.w = vmax(maxe.w, s01.w) //find max
} {
s01_s.w = vasl(s01.w,zshift) //
s10.w = vsub(s10.w, sa2.w) //
s11.uw += vrmpy(z5432.ub, w210_.ub) //[1,11]z5432
} {
s00.w = vasl(s00.w,zshift) //
s11.w = vsub(s11.w, sa3.w) //
mine.w = vmin(mine.w, s01.w) //find min
maxe.w = vmax(maxe.w, s10.w) //find max
} {
mine.w = vmin(mine.w, s10.w) //find min
s10.w = vasl(s10.w,zshift) //
ptr_z0 = add(ptr_z0, next_out_width_32) //update output ptr to next depth
d0.w = vmpye(s00.w, vrecip.uh) //multiply by 1/max
} {
maxe.w = vmax(maxe.w, s11.w) //find max
d1.w = vmpye(s01_s.w, vrecip.uh) //multiply by 1/max
x0.tmp = vmem(ptr_x0+#0) //[0,0]read first 4 depths
y0.b = vshuff(x0.b) //[0,0]1st part of shuffle 4
} {
mine.w = vmin(mine.w, s11.w) //find min of acc
d0.w += vmpyo(s00.w, vrecip.h):SSR //multiply by 1/max
x1.tmp = vmem(ptr_x0+#1) //[0,1]load 2nd 4 depths
y1 = vrdelta(x1, perm2) //[0,1]1st part of shuffle 4
} {
ptr_x1 = ptr_x0 //
d1.w += vmpyo(s01_s.w, vrecip.h):SSR //multiply by 1/max
sa0 = #0 //
s11.w = vasl(s11.w,zshift) //
} {
d2.w = vmpye(s10.w, vrecip.uh) //multiply by 1/max
sa1 = #0 //
} {
woffset = vmem(filt_sum++#1) //read in sum of taps
ptr_x1 =add(ptr_x1, next_in_width_depth) //[0,1]update ptr to next logical line
z5476 = vdelta(y1, perm3) //[0,3]x7654
d3.w = vmpye(s11.w, vrecip.uh) //multiply by 1/max
} {
d2.w += vmpyo(s10.w, vrecip.h):SSR //multiply by 1/max
d1d0.h = vpacke(d1.w, d0.w) //take upp er 16bits of rnded acc
sa2 = #0 //
} {
d3.w += vmpyo(s11.w, vrecip.h):SSR //multiply by 1/max
sa3 = #0 //
} {
w_210 = vmem(ptr_w+#0) //[0,2]
s00 = woffset //init accumulators
s01 = woffset //init accumulators
} {
d3d2.h = vpacke(d3.w, d2.w) //take upp er 16bits of rnded acc
s10 = woffset //init accumulators
stride_vert = memw(sp+#28<<2) //
} {
s11 = woffset //init accumulators
u_210.tmp = vmem(ptr_w++#1) //[0,3]
w210_.w = vasl(u_210.w, s8) //[0,3]adjust taps to odd locations
z3210.b = vshuff(y0.b) //[0,2]x3210
} {
d3_d0.ub = vpack(d3d2.h, d1d0.h):sat //deal into sequence
vmem(ptr_z1+#0) = d3_d0.new //store quantized bytes
ptr_z1 = ptr_z0 //next output depth
}:endloop1 //end depth
/* --------------------------------------------------------------------------- */
{ ptr_z0_ptr_x0 = vsubw(ptr_z0_ptr_x0, out_in_wide_deep_128) //next inputs/outputs
col_count = add(col_count, #-4) //dec width count
p0 = cmp.eq(col_count, #4) //next line
if(!p0.new) jump:t .L_width //
}
/* --------------------------------------------------------------------------- */
{ ptr_zi=add(ptr_zi,next_out_width_depth) //incrmeent output ptr
ptr_xi+=mpyi(stride_vert,next_in_width_depth)//incrmeent input ptr
col_count = out_width //reset row count
out_height = add(out_height, #-1) //
} {
ptr_z0 = ptr_zi //update to next output
ptr_x0 = ptr_xi //update to next input
p0 = cmp.eq(out_height, #0) //
if(!p0.new) jump:t .L_height //next line
}
/* --------------------------------------------------------------------------- */
{ c4 = #-4 //deal words
ptr_max = memw(sp+#25<<2) //
loop0(.L_peak, #4) //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //deal out odd and even
r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //restore
}
.L_peak:
{ maxe.w = vmax(maxe.w, maxo.w) //reduce
mino_mine = vdeal(mine, mine, c4) //
} {
mine.w = vmin(mine.w, mino.w) //
} {
maxo_maxe = vdeal(maxe, maxe, c4) //
}:endloop0
{ maxe.w = vmax(maxe.w, maxo.w) //
vmem(ptr_max+#0) = maxe.new //store max
mino_mine = vdeal(mine, mine, c4) //
r25:24 = memd(sp+#32) //restore
} {
mine.w = vmin(mine.w, mino.w) //
vmem(ptr_max+#1) = mine.new //store min
r27:26 = memd(sp+#40) //restore
}
/* --------------------------------------------------------------------------- */
{
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
dealloc_return //return
}
.L_end:
.size dwconv2dbbb_unsigned_v60_asm, .L_end-dwconv2dbbb_unsigned_v60_asm
#else
/* =========================================================================== */
.text
.file "dwconv2dbbb_unsigned_d32_v60_h.S"
.global dwconv3x3bbb_unsigned_v60_asm
.balign 32
.type dwconv3x3bbb_unsigned_v60_asm, @function
dwconv3x3bbb_unsigned_v60_asm:
/* =========================================================================== */
//h stride assumed 1 vstride 1 or 2 filt width assumed 3 - hstride 2 requires new function
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define filt_sumi r2 //gemsumb
#define ptr_zi r3 //results
#define next_in_row r4 //width*depth*(stride_horz==1)
#define next_in_width_32 r5 //width*32*(stride_horz==1)
#define in_depth r8 //1 total in depth
#define out_width r9 //2is amount of work to be done
#define next_out_row r10 //3next output line amount in bytes
#define out_height r11 //4 number of vertical lines to perform
#define recip_level r12 //5 255 / (MAX - MIN) - used to scale to bytes
#define zshift r13 //6 spare input
#define ptr_max r14 //7 maximum and minum buffer
#define stride_vert r15 //8 vertical stride is an option to save ops
#define filt_offset r16 //9
#define filt_off210_ r16 //
#define filt_off_210 r17 //
#define out_width_in_depth r9:8 //
#define out_height_next_out_row r11:10//
#define zshift_recip_level r13:12 //
#define stride_vert_ptr_max r15:14 //
//-----------------------------------------------------------------
#define s8 r7 //const = 8
#define nrot r6 //rotate amount
#define ptr_x r6 //
#define ptr_x0 r18 //
#define ptr_x1 r19 //
#define ptr_x2 r20 //
#define ptr_z r21 //
#define ptr_w r22 //
#define filt_sum r23 //
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
//-----------------------------------------------------------------
#define vrecip v0 //
#define woffset v1 //
#define s00 v2 //
#define s01 v3 //
#define s01s00 v3:2 //
#define s10 v4 //
#define s11 v5 //
#define s11s10 v5:4 //
#define sa0 v6 //
#define sa1 v7 //
#define sa1sa0 v7:6 //
#define sa2 v8 //
#define sa3 v9 //
#define sa3sa2 v9:8 //
#define l0w_210 v10 //
#define l0w210_ v11 //
#define l1w_210 v12 //
#define l1w210_ v13 //
#define l2w_210 v14 //
#define l2w210_ v15 //
#define l0z3210 v16 //
#define l1z3210 v17 //
#define l2z3210 v18 //
#define l0z7654 v19 //
#define l1z7654 v19 //
#define l2z7654 v19 //
#define y0 v19 //
#define y1 v19 //
#define y2 v19 //
#define l0z3232 v20 //
#define l1z3232 v20 //
#define l2z3232 v20 //
#define l0z5432 v20 //
#define l1z5432 v20 //
#define l2z5432 v20 //
#define d3210 v21 //
#define d0 v22 //
#define d1 v23 //
#define d1_d0 v23:22 //
#define d2 v24 //
#define d3 v25 //
#define d3_d2 v25:24 //
#define maxe v26 //
#define mine v27 //
#define d1d0 v21 //
#define d3d2 v24 //
#define x0 v30 //
#define x1 v30 //
#define x2 v30 //
#define maxt v2 //
#define mint v3 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ out_width_in_depth = memd(sp+#0) //
out_height_next_out_row = memd(sp+#8) //
sp = add(sp,#-4*8) //
}{
zshift_recip_level = memd(sp+#(16+4*8)) //
stride_vert_ptr_max = memd(sp+#(24+4*8)) //
nop; nop //
}{
memd(sp+#0) = r17:16 //
filt_offset = memw(sp+#(32+4*8)) //
vrecip = vsplat(recip_level) //
s8 = #8 //shift by 8
}{
memd(sp+#8) = r19:18 //
in_depth = lsr(in_depth, #5) //1/32
filt_offset = vsplatb(filt_offset) //
maxe = vmem(ptr_max+#0) //
}{
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
out_width = lsr(out_width,#2) //
ptr_w = ptr_wi //
}{
filt_off210_ = asl(filt_offset, #8) //
filt_off_210 = lsr(filt_offset, #8) //
filt_sum = filt_sumi //
mine = vmem(ptr_max+#1) //
}
/* --------------------------------------------------------------------------- */
.L_height:
{ ptr_x = ptr_xi //
ptr_z = ptr_zi //
p3 = xor(p3,p3) //
loop1(.L_depth,in_depth) //
}
.balign 32
.L_depth:
{ woffset = vmem(filt_sum++#1) //read in sum of taps
ptr_x0 = ptr_x //
ptr_x1 = add(ptr_x,next_in_row) //
nop //
}{
x0.tmp = vmem(ptr_x0++#1):nt //
l0z3210.b = vshuff(x0.b) //
ptr_x2 = add(ptr_x1,next_in_row) //
nop //
}{
x1.tmp = vmem(ptr_x1++#1) //
l1z3210.b = vshuff(x1.b) //
loop0(.L_width,out_width) //
nop //
}{
x2.tmp = vmem(ptr_x2++#1) //
l2z3210.b = vshuff(x2.b) //
ptr_x = add(ptr_x,next_in_width_32) //
}{
l0z3210.b = vshuff(l0z3210.b) //
l0w_210.cur = vmem(ptr_w++#1) //
l0w210_.w = vasl(l0w_210.w, s8) //
}{
l1z3210.b = vshuff(l1z3210.b) //
l1w_210.cur = vmem(ptr_w++#1) //
l1w210_.w = vasl(l1w_210.w, s8) //
}{
l2z3210.b = vshuff(l2z3210.b) //
l2w_210.cur = vmem(ptr_w++#1) //
l2w210_.w = vasl(l2w_210.w, s8) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ x0.tmp = vmem(ptr_x0++#1):nt //[0, 0]
y0.b = vshuff(x0.b) //[0, 0]
s00 = woffset //[0, 0]
d2.w += vmpyo(s10.w, vrecip.h):SSR //[1,27]multiply by 1/max
}{
l0z3232.h = vshuffo(l0z3210.h,l0z3210.h) //[0, 1]
s01 = woffset //[0, 1]
d3.w += vmpyo(s11.w, vrecip.h):SSR //[1,28]multiply by 1/max
}{
l0z7654.b = vshuff(y0.b) //[0, 2]x3210
s00.uw += vrmpy(l0z3210.ub, l0w_210.ub) //[0, 2]filter even output
s10 = woffset //[0, 0]
}{
l0z5432.h = vshuffe(l0z7654.h, l0z3232.h) //[0, 3]
s11 = woffset //[0, 3]
s01.uw += vrmpy(l0z3210.ub, l0w210_.ub) //[0, 3]z3210filter oddn output
}{
sa0.uw = vrmpy(l0z3210.ub, filt_off_210.ub) //[0, 4]z3210filter oddn output
sa1.uw = vrmpy(l0z3210.ub, filt_off210_.ub) //[0, 4]z3210filter oddn output
d1d0.h = vpack(d1.w, d0.w):sat //[1,29]take lower 16bits of rnded acc
}{
s10.uw += vrmpy(l0z5432.ub, l0w_210.ub) //[0, 5]z5432
l0z3210 = l0z7654 //[0, 5]
d3d2.h = vpack(d3.w, d2.w):sat //[1,30]take lower 16bits of rnded acc
}{
x1.tmp = vmem(ptr_x1++#1) //[0, 6]
y1.b = vshuff(x1.b) //[0, 6]
s11.uw += vrmpy(l0z5432.ub, l0w210_.ub) //[0, 6]z5432
}{
l1z3232.h = vshuffo(l1z3210.h,l1z3210.h) //[0, 7]
sa2.uw = vrmpy(l0z5432.ub, filt_off_210.ub) //[0, 7]z3210filter oddn output
sa3.uw = vrmpy(l0z5432.ub, filt_off210_.ub) //[0, 7]z3210filter oddn output
}{
l1z7654.b = vshuff(y1.b) //[0, 8]x3210
s00.uw += vrmpy(l1z3210.ub, l1w_210.ub) //[0, 8]filter even output
}{
l1z5432.h = vshuffe(l1z7654.h, l1z3232.h) //[0, 9]
s01.uw += vrmpy(l1z3210.ub, l1w210_.ub) //[0, 9]z3210filter oddn output
}{
sa0.uw += vrmpy(l1z3210.ub, filt_off_210.ub) //[0,10]z3210filter oddn output
sa1.uw += vrmpy(l1z3210.ub, filt_off210_.ub) //[0,10]z3210filter oddn output
}{
s10.uw += vrmpy(l1z5432.ub, l1w_210.ub) //[0,11]z5432
l1z3210 = l1z7654 //[0,11]
}{
x2.tmp = vmem(ptr_x2++#1) //[0,12]
y2.b = vshuff(x2.b) //[0,12]
s11.uw += vrmpy(l1z5432.ub, l1w210_.ub) //[0,12]z5432
}{
l2z3232.h = vshuffo(l2z3210.h,l2z3210.h) //[0,13]
sa2.uw += vrmpy(l1z5432.ub, filt_off_210.ub) //[0,13]z3210filter oddn output
sa3.uw += vrmpy(l1z5432.ub, filt_off210_.ub) //[0,13]z3210filter oddn output
}{
l2z7654.b = vshuff(y2.b) //[0,14]x3210
s00.uw += vrmpy(l2z3210.ub, l2w_210.ub) //[0,14]filter even output
}{
l2z5432.h = vshuffe(l2z7654.h, l2z3232.h) //[0,15]
s01.uw += vrmpy(l2z3210.ub, l2w210_.ub) //[0,15]z3210filter oddn output
}{
sa0.uw += vrmpy(l2z3210.ub, filt_off_210.ub) //[0,16]z3210filter oddn output
sa1.uw += vrmpy(l2z3210.ub, filt_off210_.ub) //[0,16]z3210filter oddn output
l2z3210 = l2z7654 //[0,16]
}{
s10.uw += vrmpy(l2z5432.ub, l2w_210.ub) //[0,17]z5432
s01s00.w = vsub(s01s00.w, sa1sa0.w) //[0,17]
}{
s11.uw += vrmpy(l2z5432.ub, l2w210_.ub) //[0,18]z5432
maxe.w = vmax(maxe.w, s00.w) //[0,18]find max
mine.w = vmin(mine.w, s00.w) //[0,18]find min
}{
sa2.uw += vrmpy(l2z5432.ub, filt_off_210.ub) //[0,19]z3210filter oddn output
sa3.uw += vrmpy(l2z5432.ub, filt_off210_.ub) //[0,19]z3210filter oddn output
maxe.w = vmax(maxe.w, s01.w) //[0,19]find max
s00.w = vasl(s00.w,zshift) //[0,19]
}{
s11s10.w = vsub(s11s10.w, sa3sa2.w) //[0,20]
mine.w = vmin(mine.w, s01.w) //[0,20]find min
s01.w = vasl(s01.w,zshift) //[0,20]
}{
d0.w = vmpye(s00.w, vrecip.uh) //[0,21]multiply by 1/max
maxe.w = vmax(maxe.w, s10.w) //[0,21]find max
mine.w = vmin(mine.w, s10.w) //[0,21]find min
}{
d1.w = vmpye(s01.w, vrecip.uh) //[0,22]multiply by 1/max
maxe.w = vmax(maxe.w, s11.w) //[0,22]find max
mine.w = vmin(mine.w, s11.w) //[0,22]find min of acc
}{
d0.w += vmpyo(s00.w, vrecip.h):SSR //[0,23]multiply by 1/max
s10.w = vasl(s10.w,zshift) //[0,23]
}{
d1.w += vmpyo(s01.w, vrecip.h):SSR //[0,24]multiply by 1/max
s11.w = vasl(s11.w,zshift) //[0,24]
}{
d2.w = vmpye(s10.w, vrecip.uh) //[0,25]multiply by 1/max
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[1,31]deal into sequence
if p3 vmem(ptr_z++#1):nt = d3210.new //[1,31]store quantized bytes
}{
d3.w = vmpye(s11.w, vrecip.uh) //[0,26]multiply by 1/max
p3 = or(p3,!p3) //
}:endloop0:endloop1
/* --------------------------------------------------------------------------- */
{ d2.w += vmpyo(s10.w, vrecip.h):SSR //[1,27]multiply by 1/max
out_height = add(out_height, #-1) //
}{
d3.w += vmpyo(s11.w, vrecip.h):SSR //[1,28]multiply by 1/max
p0 = cmp.eq(out_height, #0) //
}{
d1d0.h = vpack(d1.w, d0.w):sat //[1,29]take lower 16bits of rnded acc
ptr_xi+=mpyi(stride_vert,next_in_row) //incrmeent input ptr
ptr_zi=add(ptr_zi,next_out_row) //incrmeent output ptr
}{
d3d2.h = vpack(d3.w, d2.w):sat //[1,30]take lower 16bits of rnded acc
ptr_w = ptr_wi //
filt_sum = filt_sumi //
}{
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[1,31]deal into sequence
vmem(ptr_z+#0):nt = d3210.new //[1,31]store quantized bytes
if(!p0) jump .L_height //next line
}
/* --------------------------------------------------------------------------- */
{ vmem(ptr_max+#0) = maxe //store max
r17:16 = memd(sp+#0) //restore stack
}{
vmem(ptr_max+#1) = mine //store min
r19:18 = memd(sp+#8) //restore
}{
/* --------------------------------------------------------------------------- */
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
sp = add(sp,#4*8) // pop stack
jumpr r31 //return
}
.L_end:
.size dwconv3x3bbb_unsigned_v60_asm, .L_end-dwconv3x3bbb_unsigned_v60_asm
#endif
|
XiaoMi/nnlib | 9,947 | hexagon/asm_src/gemacca_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
/*======================================================================*/
/* FUNCTIONS : gemacca_asm */
/* */
/* DESCRIPTION */
/* X matrix to be accumulated horizontally and added to */
/* sum. */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for xsum */
/*======================================================================*/
/* IDEAL-CYCLE-COUNT: */
/* -> 3*K*N/16+6*N+8 */
/* */
/* MEMORY */
/* CODESIZE = 288 bytes */
/* STACK = 64 bytes */
/* ASSUMPTIONS */
/* x, xsum is 8byte aligned */
/* N%4=0 K%8=0 */
/* C MODEL */
/* N = Nlen */
/* K = Klen | Kstride */
/*======================================================================*/
#if 0
void gemacca_cn(uchar *x, int N, int K, int * xsum, int y_offset)
{
int i,, k;
int ksize = 0xffff&(K >> 16);
int kstride = 0xffff& K;
int x_val;
int sum;
for (i=0; i < N; i++) {
sum = xsum[i];
for (k=0; k < ksize; k++) {
x_val = x[i*kstride+k];
sum += x_val ;
}
xsum[i] = sum*y_offset;
}
}
#endif
/*=============================================================================*/
.text
.file "gemacca_h.S"
.global gemacca_asm
.balign 32
.type gemacca_asm, @function
gemacca_asm:
/*=============================================================================*/
#define ptr_x r0 //
#define n r1 //n is number of rows to be summed
#define k r2 //k | kstride
#define ptr_xsum r3 //
#define y_offset r4
#define kjump r6 //kstride
#define ki r7 //
#define kstride r8 //alias to k1
#define mkk M1 //
#define kk_1 M0 //skip back
#define c32_kstride r10 //11111111
#define c8_kstride r11 //11111111
#define l1xptri r12 //
#define l1xptr r13 //11111111
#define kstride2 r9 //
#define x07x04_x03x00 r21:20 //111111--
#define x07x04 r21 //111111--
#define x03x00 r20 //11------
#define x17x14_x13x10 r15:14 //-1111111
#define x17x14 r15 //-1111111
#define x13x10 r14 //-111----
#define x27x24_x23x20 r21:20 //-------1
#define x27x24 r21 //-------1
#define x23x20 r20 //-------1
#define x37x34_x33x30 r15:14 //--------
#define x37x34 r15 //--------
#define x33x30 r14 //--------
#define sum01_sum00 r17:16
#define sum11_sum10 r19:18
#define sum01 r17
#define sum00 r16
#define sum11 r19
#define sum10 r18
/*=============================================================================*/
{
allocframe(#32) //
kjump = lsr(k, #16) //size of k
n = lsr(n, #1) //divide by 2
kstride = zxth(k) //
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
loop1(.L_loopN, n) //[ , P]for(i=0; i < n; i+=2){
ki = lsr(k, #20) //k/16
} {
memd(sp+#16) = r21:20 //
kstride2 = asl(kstride, #1) //2*kstride
l1xptr = addasl(ptr_x, kstride, #1) //l1 fetch 2 klines ahead
c32_kstride = sub(#32, kstride) //zag back to next column of lines
} {
kjump = sub(kstride2, kjump) //+32 - 4*k
c8_kstride = sub(#16, kstride) //zag back to next column of dwords
mkk = kstride //stride k
p2 = cmp.eq(r0, r0) //or(p2, !p2)
} {
kk_1 = c8_kstride //
l1xptr = addasl(l1xptr, kstride, #1) //[ , P]advance by 2k strip
l1xptri = l1xptr //[ , P]make temp copy
}
/*=============================================================================*/
.balign 32
.L_loopN:
{
sum01_sum00 = combine(#0, #0) //
sum11_sum10 = combine(#0, #0) //
loop0(.L_loopK, ki) //[ , P]ki is k1/2 - 2
}
/*============================================================================*/
.balign 32
.L_loopK:
{
x17x14_x13x10 = memd(ptr_x+#8) //[0,0]
x07x04_x03x00 = memd(ptr_x++mkk) //[0,0]
} {
sum01_sum00 +=vraddub(x17x14_x13x10, x07x04_x03x00) //[0,1]
x27x24_x23x20 = memd(ptr_x++kk_1) //[0,3]6
x37x34_x33x30 = memd(ptr_x+#8) //[0,3]
p2 = not(p2) //[0,3]
} {
sum11_sum10+=vraddub(x37x34_x33x30, x27x24_x23x20) //[0,4]
//dcfetch(l1xptri+#0) //[0,1]prefetch next line
if(!p2)l1xptri = add(l1xptri, kstride) //[0,1]nex line
if(p2) l1xptri = add(l1xptri,c32_kstride) //[0,3]
}:endloop0
{
sum00 = add(sum01, sum00) //
sum01 = add(sum11, sum10) //
ptr_x = add(ptr_x, kjump) //skip back to next row
} {
sum00 = mpyi(sum00, y_offset) //
sum01 = mpyi(sum01, y_offset) //
sum11_sum10 = memd(ptr_xsum+#0) //
} {
sum00 = add(sum00, sum10) //
sum01 = add(sum01, sum11) //
l1xptri = l1xptr //[ , P]make temp copy
} {
l1xptr = addasl(l1xptr, kstride, #1) //[ , P]advance by 2k strip
memd(ptr_xsum++#1<<3) = sum01_sum00 //
p2 = or(p2, !p2) //
}:endloop1
/*=============================================================================*/
{
r17:16 = memd(sp+#0) //restore stack and return
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gemacca_asm, .L_end-gemacca_asm
|
XiaoMi/nnlib | 28,722 | hexagon/asm_src/maxpool_d32.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.file "maxpool_d32.S"
/*======================================================================*/
/* FUNCTIONS : maxpool_slice_hvx_3x3_stride1 */
/* */
/* DESCRIPTION */
/* Get maximun on 3x3 with d32 format */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* STACK = bytes */
/* ASSUMPTIONS */
/* arrays are 128 byte aligned */
/* C MODEL */
/*======================================================================*/
/*=============================================================================*/
#define optr r0
#define iptr r1
#define in_next_row r2
#define out_next_row r3
#define out_vectors r4
#define out_lines r5
/*=============================================================================*/
#define c32 r6
#define c64 r7
#define c64_c32 r7:6
#define iptr0 r8
#define optr0 r9
#define optr0_iptr0 r9:8
#define iptr1 r10
#define optr1 r11
#define pfptr0 r12
#define pfptr1 r13
#define l2param_l r14
#define l2param_h r15
#define l2param r15:14
#define out_lines_d2 r28
#define out_alig out_lines
#define offset optr1
/*=============================================================================*/
#define sl0 v0
#define sl1 v1
#define smaxl012_0 v2
#define smaxl012_1 v3
#define smaxl123_0 v4
#define smaxl123_1 v5
#define sout0_new v6
#define sout0_old v7
#define sout1_new v8
#define sout1_old v9
#define smaxl012d1 v10
#define smaxl123d1 v11
#define sot0 v12
#define sot1 v13
#define sout0 v14
#define sout1 v15
#define smaxl12 v16
#define sl2 sl0
#define sl3 sl0
#define smaxl012d2 smaxl012d1
#define smaxl123d2 smaxl123d1
/*=============================================================================*/
.text
.global maxpool_slice_hvx_3x3_stride1
.balign 32
.type maxpool_slice_hvx_3x3_stride1, @function
maxpool_slice_hvx_3x3_stride1:
{ p0 = cmp.gt(out_lines,#0) //
if !p0.new r0 = #0 //
if !p0.new jumpr:nt r31 //
}{
out_lines_d2 = asrrnd(out_lines,#1) //
m0 = in_next_row //
offset = #128 //
c64_c32 = combine(#64,#32) //
}{
out_alig = memw(sp+#0) //
offset -= asl(in_next_row,#1) //
loop1(.maxpool3x3s1_d32_outloop,out_lines_d2)//
}{
m1 = offset //
l2param_l = add(out_vectors,#1) //
l2param_h = #128 //
pfptr0 = addasl(iptr,in_next_row,#2) //
}{
pfptr1 = add(pfptr0,in_next_row) //
l2param_l = combine(l2param_h.l,l2param_l.l) // l2param=(0|128|128|out_vectors+1)
iptr1 = add(iptr,in_next_row) //
}
.balign 32
/*============================================================================*/
.maxpool3x3s1_d32_outloop:
{ optr0_iptr0 = combine(optr,iptr) //
p0 = cmp.eq(out_lines_d2,#1) // if (last iteration)
if p0.new l2param_l = #0 // then cancel l2fetch
sl1 = vmem(iptr1++m0) //
}{
l2fetch(pfptr0,l2param) //
optr1 = add(optr0,out_next_row) //
}{
l2fetch(pfptr1,l2param) //
}{
sl2.tmp = vmem(iptr1++m0) //
smaxl12.ub = vmax(sl1.ub,sl2.ub) //
p3=sp2loop0(.maxpool3x3s1_d32_rowloop,out_vectors)//
}{
sl0.tmp = vmem(iptr0++#1) //
smaxl012_1.ub = vmax(sl0.ub,smaxl12.ub) //
sout0_old = #0 //
sout1_old = #0 //
}{
sl3.tmp = vmem(iptr1++m1) //
smaxl123_1.ub = vmax(smaxl12.ub,sl3.ub) //
}
.balign 32
/*============================================================================*/
.maxpool3x3s1_d32_rowloop:
{ sl1 = vmem(iptr1++m0) //[1, 0]
sot1.ub = vmax(smaxl123_0.ub,smaxl123d1.ub) //[2, 6]
sout0 = vlalign(sout0_new,sout0_old,out_alig)//[3,11]
if p3 sout0_old = sout0_new //[3,11]
}{
sl2.tmp = vmem(iptr1++m0) //[1, 1]
smaxl12.ub = vmax(sl1.ub,sl2.ub) //[1, 1]
sout1 = vlalign(sout1_new,sout1_old,out_alig)//[3,12]
if p3 sout1_old = sout1_new //[3,12]
}{
smaxl012_0 = smaxl012_1 //[1, 2]
sl0.tmp = vmem(iptr0++#1) //[1, 2]
smaxl012_1.ub = vmax(sl0.ub,smaxl12.ub) //[1, 2]
smaxl012d2=valign(smaxl012_1,smaxl012_0,c64) //[2, 7]
}{
smaxl123_0 = smaxl123_1 //[1, 3]
sl3.tmp = vmem(iptr1++m1) //[1, 3]
smaxl123_1.ub = vmax(smaxl12.ub,sl3.ub) //[1, 3]
smaxl123d2=valign(smaxl123_1,smaxl123_0,c64) //[2, 8]
}{
smaxl012d1=valign(smaxl012_1,smaxl012_0,c32) //[1, 4]
sout0_new.ub = vmax(sot0.ub,smaxl012d2.ub) //[2, 9]
if p3 vmem(optr0++#1) = sout0 //[3,13]
}{
smaxl123d1=valign(smaxl123_1,smaxl123_0,c32) //[1, 5]
sot0.ub = vmax(smaxl012_0.ub,smaxl012d1.ub) //[1, 5]
sout1_new.ub = vmax(sot1.ub,smaxl123d2.ub) //[2,10]
if p3 vmem(optr1++#1) = sout1 //[3,14]
}:endloop0
/*============================================================================*/
{ sot1.ub = vmax(smaxl123_0.ub,smaxl123d1.ub) //[2, 6]
sout0 = vlalign(sout0_new,sout0_old,out_alig)//[3,11]
if p3 sout0_old = sout0_new //[3,11]
if p3 vmem(optr0++#1) = sout0.new //[3,13]
}{
sout1 = vlalign(sout1_new,sout1_old,out_alig)//[3,12]
if p3 sout1_old = sout1_new //[3,12]
if p3 vmem(optr1++#1) = sout1.new //[3,14]
}{
smaxl012d2=valign(smaxl012_1,smaxl012_0,c64) //[2, 7]
iptr += asl(in_next_row,#1) //
optr += asl(out_next_row,#1) //
}{
smaxl123d2=valign(smaxl123_1,smaxl123_0,c64) //[2, 8]
sout0_new.ub = vmax(sot0.ub,smaxl012d2.ub) //[2, 9]
}{
sout1_new.ub = vmax(sot1.ub,smaxl123d2.ub) //[2,10]
pfptr0 += asl(in_next_row,#1) //
pfptr1 += asl(in_next_row,#1) //
}{
sout0 = vlalign(sout0_new,sout0_old,out_alig)//[3,11]
vmem(optr0+#0) = sout0.new //[3,13]
out_lines_d2 = add(out_lines_d2,#-1) //
iptr1 = add(iptr,in_next_row) //
}{
sout1 = vlalign(sout1_new,sout1_old,out_alig)//[3,12]
vmem(optr1+#0) = sout1.new //[3,14]
}:endloop1
/*============================================================================*/
{ r0 = #0 //
jumpr r31 //
}
.maxpool_slice_hvx_3x3_stride1_end:
/*=============================================================================*/
.size maxpool_slice_hvx_3x3_stride1, .-maxpool_slice_hvx_3x3_stride1
/*=============================================================================*/
#if 0 //////////////////////////// >>>> MOVED TO maxpool_3x3s2_d32.S >>>>>>>>>>>>
/*
* Redo these. EJP did horizontal max and then vertical. That's dumb.
* We should do vertical first and then horizontal since the horizontal is more expensive.
*/
#define IN00 v0
#define IN04 v1
#define IN10 v2
#define IN14 v3
#define IN20 v4
#define IN24 v5
#define IN30 v6
#define IN34 v7
#define IN40 v8
#define IN44 v9
#define IN50 v10
#define IN54 v11
#define TMP0 v12
#define TMP1 v13
#define TMP2 v14
#define TMP3 v15
#define TMP4 v16
#define TMP5 v17
#define ACC0 v18
#define ACC1 v19
#define ACC2 v20
#define ACC3 v21
#define ACC4 v22
#define ACC5 v23
#define OLDOUT0 v24
#define OLDOUT1 v25
#define OLDOUT2 v26
#define OLDOUT3 v27
#define OUT0 v28
#define OUT1 v29
#define OUT2 v30
#define OUT3 v31
#define OUTPTR r0
#define INPTR r1
#define IN_OUTPTR r1:0
#define IN_NEXT_ROW r2
#define OUT_NEXT_ROW r3
#define OUT_VECTORS_WIDE r4
#define OUT_LINES_AND_LALIGN r5
#define R32 r6
#define R64 r7
#define R64_32 r7:6
#define NEXT_OUTPTR r8
#define NEXT_INPTR r9
#define NEXT_IN_OUTPTR r9:8
#define OUTER_NEXT_OUTPTR r10
#define OUTER_NEXT_INPTR r11
#define OUTER_NEXT_IN_OUTPTR r11:10
#define RNEG32 r15
/* NOTE CAREFULLY: alias INx8 to INx0 for stride2 */
#define IN08 IN00
#define IN18 IN10
#define IN28 IN20
#define IN38 IN30
#define IN48 IN40
#define P_ODD p3
.text
.global maxpool_slice_hvx_3x3_stride2
.type maxpool_slice_hvx_3x3_stride2,@function
maxpool_slice_hvx_3x3_stride2:
{
OUT_LINES_AND_LALIGN = add(OUT_LINES_AND_LALIGN,#1)
M0 = IN_NEXT_ROW
R64_32 = combine(#64,#32)
}
{
M1 = OUT_NEXT_ROW
OUT_LINES_AND_LALIGN = lsr(OUT_LINES_AND_LALIGN,#1)
OLDOUT0 = vxor(OLDOUT0,OLDOUT0)
OLDOUT1 = vxor(OLDOUT1,OLDOUT1)
}
{
RNEG32 = #-32
loop1(.Louter_3x3s2,OUT_LINES_AND_LALIGN)
OUT_LINES_AND_LALIGN=memw(sp+#0)
}
{
P_ODD = tstbit(OUT_LINES_AND_LALIGN,#5)
}
#if 0 // not sure about this for now...
OUT_LINES_AND_LALIGN = clrbit(OUT_LINES_AND_LALIGN,#5) // clear odd bit
OUT_LINES_AND_LALIGN = lsr(OUT_LINES_AND_LALIGN,#1) // shift right by 1
OUT_LINES_AND_LALIGN = add(OUT_LINES_AND_LALIGN,#64) // Add 64 to handle 2x reduction
#endif
.falign
.Louter_3x3s2:
{
OUTER_NEXT_INPTR = addasl(INPTR,IN_NEXT_ROW,#2)
OUTER_NEXT_OUTPTR = addasl(OUTPTR,OUT_NEXT_ROW,#1)
IN00 = vmem(INPTR++M0)
NEXT_INPTR = add(INPTR,#128)
}
IN10 = vmem(INPTR++M0)
IN20 = vmem(INPTR++M0)
{
IN30 = vmem(INPTR++M0)
loop0(.Linner_3x3s2,OUT_VECTORS_WIDE)
}
{
IN40 = vmem(INPTR)
INPTR = NEXT_INPTR
}
.Linner_3x3s2:
{
NEXT_INPTR = add(INPTR,#128)
IN04.cur = vmem(INPTR++M0)
TMP0 = valign(IN04,IN00,R32)
}
{
IN14.cur = vmem(INPTR++M0)
TMP1 = valign(IN14,IN10,R32)
ACC0.ub = vmax(IN00.ub,TMP0.ub)
}
{
IN24.cur = vmem(INPTR++M0)
TMP2 = valign(IN24,IN20,R32)
ACC1.ub = vmax(IN10.ub,TMP1.ub)
}
{
IN34.cur = vmem(INPTR++M0)
TMP3 = valign(IN34,IN30,R32)
ACC2.ub = vmax(IN20.ub,TMP2.ub)
}
{
IN44.cur = vmem(INPTR+#0)
TMP4 = valign(IN44,IN40,R32)
ACC3.ub = vmax(IN30.ub,TMP3.ub)
INPTR = NEXT_INPTR
}
{
ACC4.ub = vmax(IN40.ub,TMP4.ub)
TMP0 = valign(IN04,IN00,R64)
NEXT_INPTR = add(INPTR,#128)
}
{
TMP1 = valign(IN14,IN10,R64)
ACC0.ub = vmax(ACC0.ub,TMP0.ub)
}
{
TMP2 = valign(IN24,IN20,R64)
ACC1.ub = vmax(ACC1.ub,TMP1.ub)
}
{
TMP3 = valign(IN34,IN30,R64)
ACC2.ub = vmax(ACC2.ub,TMP2.ub)
}
{
TMP4 = valign(IN44,IN40,R64)
ACC3.ub = vmax(ACC3.ub,TMP3.ub)
ACC1.ub = vmax(ACC1.ub,ACC2.ub)
}
{
ACC4.ub = vmax(ACC4.ub,TMP4.ub)
OUT0.ub = vmax(ACC0.ub,ACC1.ub)
IN08.cur = vmem(INPTR++M0)
TMP0 = valign(IN08,IN04,R32)
}
{
ACC3.ub = vmax(ACC3.ub,ACC4.ub)
IN18.cur = vmem(INPTR++M0)
TMP1 = valign(IN18,IN14,R32)
ACC0.ub = vmax(IN04.ub,TMP0.ub)
}
{
OUT1.ub = vmax(ACC2.ub,ACC3.ub)
IN28.cur = vmem(INPTR++M0)
TMP2 = valign(IN28,IN24,R32)
ACC1.ub = vmax(IN14.ub,TMP1.ub)
}
{
IN38.cur = vmem(INPTR++M0)
TMP3 = valign(IN38,IN34,R32)
ACC2.ub = vmax(IN24.ub,TMP2.ub)
}
{
IN48.cur = vmem(INPTR++M0)
TMP4 = valign(IN48,IN44,R32)
ACC3.ub = vmax(IN34.ub,TMP3.ub)
}
{
ACC4.ub = vmax(IN44.ub,TMP4.ub)
TMP0 = valign(IN08,IN04,R64)
}
{
TMP1 = valign(IN18,IN14,R64)
ACC0.ub = vmax(ACC0.ub,TMP0.ub)
}
{
TMP2 = valign(IN28,IN24,R64)
ACC1.ub = vmax(ACC1.ub,TMP1.ub)
}
{
TMP3 = valign(IN38,IN34,R64)
ACC2.ub = vmax(ACC2.ub,TMP2.ub)
}
{
TMP4 = valign(IN48,IN44,R64)
ACC3.ub = vmax(ACC3.ub,TMP3.ub)
}
{
ACC4.ub = vmax(ACC4.ub,TMP4.ub)
ACC1.ub = vmax(ACC1.ub,ACC2.ub)
}
/* OUT0 and OUT2 have the upper output line */
/* OUT1 and OUT3 have the lower output line */
/* BUT we computed all the values across, we only need half of them. */
/* Which half to take? With stride 2, depends on required_w_before! */
/* So we can lalign first x4 and then deal, or we can deal first, select, and align */
/* We choose to deal first, then select, and then align */
{
ACC3.ub = vmax(ACC3.ub,ACC4.ub)
OUT2.ub = vmax(ACC0.ub,ACC1.ub)
}
{
OUT3.ub = vmax(ACC2.ub,ACC3.ub)
vdeal(OUT2,OUT0,RNEG32)
}
{
vdeal(OUT3,OUT1,RNEG32)
NEXT_OUTPTR = add(OUTPTR,#128)
if (P_ODD) OUT0 = OUT2
}
/* Now we need to align these and store. */
{
if (P_ODD) OUT1 = OUT3
}
{
TMP0 = VLALIGN(OUT0,OLDOUT0,OUT_LINES_AND_LALIGN)
//TMP0 = OUT0
VMEM(OUTPTR++M1) = TMP0.new
OLDOUT0 = OUT0
}
{
TMP1 = VLALIGN(OUT1,OLDOUT1,OUT_LINES_AND_LALIGN)
//TMP1 = OUT1
OLDOUT1 = OUT1
VMEM(OUTPTR++M1) = TMP1.new
}
/* NOTE that IN08 and IN00 are aliased, so we're ready to go for the next iteration */
{
IN_OUTPTR = NEXT_IN_OUTPTR
}:endloop0
{
IN_OUTPTR = OUTER_NEXT_IN_OUTPTR
}:endloop1
{
r0 = #0
jumpr r31
}
.size maxpool_slice_hvx_3x3_stride2,.-maxpool_slice_hvx_3x3_stride2
#endif //////////////////////////// <<<<< MOVED TO maxpool_3x3s2_d32.S <<<<<<<<<<<
#if 1
/*======================================================================*/
/* FUNCTIONS : maxpool_slice_hvx_2x2_stride2 */
/* */
/* DESCRIPTION */
/* Get maximun on 2x2 with d32 format and stride_width=2*/
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> */
/* */
/* MEMORY */
/* CODESIZE = bytes */
/* STACK = bytes */
/* ASSUMPTIONS */
/* arrays are 128 byte aligned */
/* C MODEL */
/*======================================================================*/
/*=============================================================================*/
//#define optr r0
//#define iptr r1
//#define in_next_row r2
//#define out_next_row r3
//#define out_vectors r4
//#define out_lines r5
/*=============================================================================*/
#define cn32 r6
#define adj r7
//#define iptr0 r8
//#define optr0 r9
//#define optr0_iptr0 r9:8
//#define iptr1 r10
#define pfptr r11
#define c128 r12
#define lines_out r28
//#define l2param_l r14
//#define l2param_h r15
//#define l2param r15:14
//#define out_alig out_lines
/*=============================================================================*/
#define sl0x0 v0
#define sl0x1 v1
#define sl1x0 v2
#define sl1x1 v2
#define smaxv0 v3
#define smaxv1 v4
#define smaxv2 v5
#define smaxT_e v6
#define smaxT_o v7
#define dmaxT_oe v7:6
#define sout_old v8
#define sout_new v9
#define sout v10
#define smaxT0 smaxT_e
#define smaxT1 smaxT_o
/*=============================================================================*/
.text
.global maxpool_slice_hvx_2x2_stride2
.balign 32
.type maxpool_slice_hvx_2x2_stride2, @function
maxpool_slice_hvx_2x2_stride2:
{ p0 = cmp.gt(out_lines,#0) //
if !p0.new r0 = #0 //
if !p0.new jumpr:nt r31 //
}{
lines_out = out_lines //
out_alig = memw(sp+#0) //
l2param_l = asl(out_vectors,#1) //
cn32 = #-32 //
}{
p2 = tstbit(out_alig,#5) // ??
if p2.new l2param_l = add(l2param_l,#1) // need to read extra 128b
l2param_h = #128 //
loop1(.maxpool2x2s2_d32_outloop,lines_out) //
}{
adj = mux(p2,#96,#0) //
c128 = #128 //
pfptr = addasl(iptr,in_next_row,#2) //
l2param_l = combine(l2param_h.l,l2param_l.l) // l2param=(0|128|128|2*out_vectors+0/1)
}
.balign 32
/*============================================================================*/
.maxpool2x2s2_d32_outloop:
{ optr0_iptr0 = combine(optr,iptr) //
p0 = cmp.gt(lines_out,#2) // if (last 2 iterations)
if !p0.new l2param_l = #0 // then cancel l2fetch
p3=sp3loop0(.maxpool2x2s2_d32_rowloop,out_vectors)
//
}{
l2fetch(pfptr,l2param) //
pfptr = add(pfptr,in_next_row) //
}{
l2fetch(pfptr,l2param) //
pfptr = add(pfptr,in_next_row) //
}{
iptr1 = add(iptr,in_next_row) //
lines_out = add(lines_out,#-1) //
sout_old = #0 //
if !p2 jump .maxpool2x2s2_d32_rowloop //
}{
sl0x0 = vmem(iptr0++#1) //
nop //
}{
sl1x0.tmp = vmem(iptr1++#1) //
smaxv2.ub = vmax(sl0x0.ub,sl1x0.ub) //
}
.balign 32
/*============================================================================*/
.maxpool2x2s2_d32_rowloop:
{ sl0x0 = vmem(iptr0++#1) //[1, 0]
smaxT0 = vlalign(smaxv1,smaxv0,adj) //[2, 4]
sout_new.ub = vmax(smaxT_e.ub,smaxT_o.ub) //[3, 7]
if p3 sout_old = sout_new //[4, 9]
}{
sl0x1 = vmem(iptr0++#1) //[1, 1]
smaxv0 = smaxv2 //[1, 1]
smaxT1 = vlalign(smaxv2,smaxv1,adj) //[2, 5]
if p3 optr0 = add(optr0,c128) //[4,10]
}{
sl1x0.tmp = vmem(iptr1++#1) //[1, 2]
smaxv1.ub = vmax(sl0x0.ub,sl1x0.ub) //[1, 2]
sout = vlalign(sout_new,sout_old,out_alig) //[3, 8]
vmem(optr0+#0) = sout.new //[3, 8]for V65: use if P3 vmem(++#1)
}{
sl1x1.tmp = vmem(iptr1++#1) //[1, 3]
smaxv2.ub = vmax(sl0x1.ub,sl1x1.ub) //[1, 3]
dmaxT_oe = vdeal(smaxT1,smaxT0,cn32) //[2, 6]
}:endloop0
/*============================================================================*/
{ smaxT0 = vlalign(smaxv1,smaxv0,adj) //[2, 4]
sout_new.ub = vmax(smaxT_e.ub,smaxT_o.ub) //[3, 7]
if p3 sout_old = sout_new //[4, 9]
}{
smaxT1 = vlalign(smaxv2,smaxv1,adj) //[2, 5]
if p3 optr0 = add(optr0,c128) //[4,10]
p3 = cmp.gt(out_vectors,#1) //
}{
sout = vlalign(sout_new,sout_old,out_alig) //[3, 8]
if p3 vmem(optr0++#1) = sout.new //[3, 8]
}{
dmaxT_oe = vdeal(smaxT1,smaxT0,cn32) //[2, 6]
if p3 sout_old = sout_new //[4, 9]
}{
sout_new.ub = vmax(smaxT_e.ub,smaxT_o.ub) //[3, 7]
iptr += asl(in_next_row,#1) //
optr = add(optr,out_next_row) //
}{
sout = vlalign(sout_new,sout_old,out_alig) //[3, 8]
vmem(optr0+#0) = sout.new //[3, 8]
}:endloop1
/*============================================================================*/
{ r0 = #0 //
jumpr r31 //
}
.maxpool_slice_hvx_2x2_stride2_end:
/*=============================================================================*/
.size maxpool_slice_hvx_2x2_stride2, .-maxpool_slice_hvx_2x2_stride2
/*=============================================================================*/
#else
.text
.global maxpool_slice_hvx_2x2_stride2
.type maxpool_slice_hvx_2x2_stride2,@function
maxpool_slice_hvx_2x2_stride2:
{
OUT_LINES_AND_LALIGN = add(OUT_LINES_AND_LALIGN,#1)
M0 = IN_NEXT_ROW
R64_32 = combine(#64,#32)
}
{
M1 = OUT_NEXT_ROW
OUT_LINES_AND_LALIGN = lsr(OUT_LINES_AND_LALIGN,#1)
OLDOUT0 = vxor(OLDOUT0,OLDOUT0)
OLDOUT1 = vxor(OLDOUT1,OLDOUT1)
}
{
RNEG32 = #-32
loop1(.Louter_2x2s2,OUT_LINES_AND_LALIGN)
OUT_LINES_AND_LALIGN=memw(sp+#0)
}
{
P_ODD = tstbit(OUT_LINES_AND_LALIGN,#5)
}
#if 0 // not sure about this for now...
OUT_LINES_AND_LALIGN = clrbit(OUT_LINES_AND_LALIGN,#5) // clear odd bit
OUT_LINES_AND_LALIGN = lsr(OUT_LINES_AND_LALIGN,#1) // shift right by 1
OUT_LINES_AND_LALIGN = add(OUT_LINES_AND_LALIGN,#64) // Add 64 to handle 2x reduction
#endif
.falign
.Louter_2x2s2:
{
OUTER_NEXT_INPTR = addasl(INPTR,IN_NEXT_ROW,#2)
OUTER_NEXT_OUTPTR = addasl(OUTPTR,OUT_NEXT_ROW,#1)
IN00 = vmem(INPTR++M0)
NEXT_INPTR = add(INPTR,#128)
}
IN10 = vmem(INPTR++M0)
IN20 = vmem(INPTR++M0)
{
IN30 = vmem(INPTR+#0)
loop0(.Linner_2x2s2,OUT_VECTORS_WIDE)
INPTR = NEXT_INPTR
}
.Linner_2x2s2:
{
NEXT_INPTR = add(INPTR,#128)
IN04.cur = vmem(INPTR++M0)
TMP0 = valign(IN04,IN00,R32)
}
{
IN14.cur = vmem(INPTR++M0)
TMP1 = valign(IN14,IN10,R32)
ACC0.ub = vmax(IN00.ub,TMP0.ub)
}
{
IN24.cur = vmem(INPTR++M0)
TMP2 = valign(IN24,IN20,R32)
ACC1.ub = vmax(IN10.ub,TMP1.ub)
}
{
IN34.cur = vmem(INPTR+#0)
TMP3 = valign(IN34,IN30,R32)
ACC2.ub = vmax(IN20.ub,TMP2.ub)
INPTR = NEXT_INPTR
}
{
ACC3.ub = vmax(IN30.ub,TMP3.ub)
NEXT_INPTR = add(INPTR,#128)
}
{
OUT0.ub = vmax(ACC0.ub,ACC1.ub)
OUT1.ub = vmax(ACC2.ub,ACC3.ub)
IN08.cur = vmem(INPTR++M0)
TMP0 = valign(IN08,IN04,R32)
}
{
IN18.cur = vmem(INPTR++M0)
TMP1 = valign(IN18,IN14,R32)
ACC0.ub = vmax(IN04.ub,TMP0.ub)
}
{
IN28.cur = vmem(INPTR++M0)
TMP2 = valign(IN28,IN24,R32)
ACC1.ub = vmax(IN14.ub,TMP1.ub)
}
{
IN38.cur = vmem(INPTR++M0)
TMP3 = valign(IN38,IN34,R32)
ACC2.ub = vmax(IN24.ub,TMP2.ub)
OUT2.ub = vmax(ACC0.ub,ACC1.ub)
}
{
ACC3.ub = vmax(IN34.ub,TMP3.ub)
}
/* OUT0 and OUT2 have the upper output line */
/* OUT1 and OUT3 have the lower output line */
/* BUT we computed all the values across, we only need half of them. */
/* Which half to take? With stride 2, depends on required_w_before! */
/* So we can lalign first x4 and then deal, or we can deal first, select, and align */
/* We choose to deal first, then select, and then align */
{
OUT3.ub = vmax(ACC2.ub,ACC3.ub)
vdeal(OUT2,OUT0,RNEG32)
}
{
vdeal(OUT3,OUT1,RNEG32)
NEXT_OUTPTR = add(OUTPTR,#128)
if (P_ODD) OUT0 = OUT2
}
/* Now we need to align these and store. */
{
if (P_ODD) OUT1 = OUT3
}
{
TMP0 = VLALIGN(OUT0,OLDOUT0,OUT_LINES_AND_LALIGN)
//TMP0 = OUT0
VMEM(OUTPTR++M1) = TMP0.new
OLDOUT0 = OUT0
}
{
TMP1 = VLALIGN(OUT1,OLDOUT1,OUT_LINES_AND_LALIGN)
//TMP1 = OUT1
OLDOUT1 = OUT1
VMEM(OUTPTR++M1) = TMP1.new
}
/* NOTE that IN08 and IN00 are aliased, so we're ready to go for the next iteration */
{
IN_OUTPTR = NEXT_IN_OUTPTR
}:endloop0
{
IN_OUTPTR = OUTER_NEXT_IN_OUTPTR
}:endloop1
{
r0 = #0
jumpr r31
}
.size maxpool_slice_hvx_2x2_stride2,.-maxpool_slice_hvx_2x2_stride2
#endif
#undef IN00
#undef IN04
#undef IN10
#undef IN14
#undef IN20
#undef IN24
#undef IN30
#undef IN34
#undef IN40
#undef IN44
#undef IN50
#undef IN54
#undef TMP0
#undef TMP1
#undef TMP2
#undef TMP3
#undef TMP4
#undef TMP5
#undef ACC0
#undef ACC1
#undef ACC2
#undef ACC3
#undef ACC4
#undef ACC5
#undef OLDOUT0
#undef OLDOUT1
#undef OLDOUT2
#undef OLDOUT3
#undef OUT0
#undef OUT1
#undef OUT2
#undef OUT3
#undef OUTPTR
#undef INPTR
#undef IN_OUTPTR
#undef IN_NEXT_ROW
#undef OUT_NEXT_ROW
#undef OUT_VECTORS_WIDE
#undef OUT_LINES_AND_LALIGN
#undef R32
#undef R64
#undef R64_32
#undef NEXT_OUTPTR
#undef NEXT_INPTR
#undef NEXT_IN_OUTPTR
#undef OUTER_NEXT_OUTPTR
#undef OUTER_NEXT_INPTR
#undef OUTER_NEXT_IN_OUTPTR
#undef RNEG32
/* NOTE CAREFULLY: alias INx8 to INx0 for stride2 */
#undef IN08
#undef IN18
#undef IN28
#undef IN38
#undef IN48
#undef P_ODD
|
XiaoMi/nnlib | 19,561 | hexagon/asm_src/vmemcpy_2d_h.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.text
.file "vmemcpy_2d_h.S"
//
// HVX rectangle copy operation
//
// void vmemcpy_2d_asm(
// unsigned wid, // must be > 0; bytes
// unsigned ht, // must be > 0; rows
// void *dst, // destination address, any allowed
// int dst_pitch, // row pitch of dest; must be multiple of vector
// void const *src, // source address, any allowed
// int src_pitch) // row pitch of source; must be multiple of vector
//
// This operation does any rectangle copy using vector operations; it uses masked
// writes as needed to avoid over-writing portions of the output buffer outside the
// specified area.
// The row pitches must be vector aligned (so whatever strategy is used on one row,
// is used on all others). When the width is smallish - one or two dest vectors per row -
// there is no 'horizontal' loop, just the height loop.
//
// All reads & writes are done with vector ops; it will only read source vectors which
// contain bytes that need to be copied to the output.
//
// The input and output regions must be disjoint (though, the address ranges
// may overlap. e.g. copying a tile horizontally within one image buffer is fine).
//
// The row-pitch condition is not checked. The wid >0, ht >0 condition is checked, and
// the routine does nothing if this is not the case.
//
// This will be a little faster for __HEXAGON_ARCH__ >= 62
// (uses vsetq2, and conditional vector load)
//
//
// There is also vmemcpy_2d_general_asm
// .. which has exactly the same form but no restriction on src/dst pitch.
// It will make multiple calls to vmemcpy_2d_asm as needed; for instance
// if the src/dst pitch are a multiple of 1/4 vector (but not 1/2) it will
// make 4 calls, each of which copies every 4th row.
//
#if !( defined(__HVXDBL__) || __HVX_LENGTH__ == 128)
#warning Probably you don't really want HVX64
#define VECN 64
#define VECLOGN 6
#else
#define VECN 128
#define VECLOGN 7
#endif
// r0 -> wid
// r1 -> ht
// r2 -> dstp
// r3 -> dst_pitch
// r4 -> srcp
// r5 -> src_pitch
//
//
.globl vmemcpy_2d_asm
.balign 16
.type vmemcpy_2d_asm,@function
vmemcpy_2d_asm:
{
r6 = sub(r4,r2); // src-dst: number for valign
r7 = and(r2,#VECN-1); // destination offset
r10 = and(r4,#VECN-1); // source offset
q0 = vsetq(r2); // the 'left' mask (needs ~ though)
}
// set q0 up to be 0 0 0 .. 1 1 1
// to be the 'start' mask for the first output vector in each row;
// and q1 to 1 1 1 .. 0 0 0 to be the 'end' mask for the last vector.
// If there is only one output vector per row we'll and them later.
{
r15 = min(r0,r1); // protect against ht <=0, wid <= 0
r2 = sub(r2,r7); // align the dest address.
r8 = add(r7,r0); // if this > VECN, needs >1 vector write per row
r6 = and(r6,#VECN-1);
}
{
p0 = !cmp.gt(r15,#0); //protect against ht,wid <= 0
if(p0.new) jump:nt .L_alldone; // done if ht <= 0 or wid <= 0
#if __HEXAGON_ARCH__ < 62
r9 = and(r8,#VECN-1); // test if end falls on a boundary
q1 = or(q0,!q0); // the 'end' mask (last partial write - all 1's if falls on a boundary).
}
{ p1 = cmp.eq(r9,#0); //
q0 = not(q0); // correct the 'start' mask
if( p1.new) jump:nt .L_mskskip;
}
{
q1 = vsetq(r8); // this the 'end' mask in the general case.
}
.L_mskskip:
#else
q0 = not(q0); // correct the 'start' mask
q1 = vsetq2(r8);
}
#endif
{
// test if > 1 output vector per row is needed.
r8 = add(r8,#-(VECN+1)); // for later...
P0=!cmp.gt(r8,#VECN); //
if( P0.new) jump:nt .L_short_rows; // r8 >= 0 if not taken.
}
//
// OK, we are writing at least two output vectors, which means there is at least a 'first' vector (using q0)
// and zero or more 'middle' vectors, and a 'last' vector (using q1).
// - Either or both of q0,q1 could be all 1's (but last of q0, and first of q1, are always 1).
// - the number of vectors read could be 1 less, same as, or one more, than the number written.
// We take a special case for src and dest having the same alignment, which is simpler since there's no rotate
// and the # read is always the same as the number written.
// First step: find the number of 'middle' vectors, and also check that aligment (r6=0)
// note: lower bits of r8 are the offset of the last dst byte in vector
// by adding r6, we get the offset of the last src byte in vector.
//
{
r11 = lsr(r8,#VECLOGN) // this is the # of 'middle' vecs (>=0)
r15 = add(r8,r6); // lower bits are the offset of last byte in src vector
p0 = cmp.eq(r6,#0); // do both have the same alignment?
if( !p0.new) jump:t .L_general_unaligned;
}
//
// the general aligned case
//
{
p1 = cmp.gt(r11,#0); // true if any 'middle' vectors to deal with
if( !p1.new) jump:t .L_ga_aligned_two;
r4 = sub(r4,r7); // align the src address.
loop1(.L_ga_outer_loop,r1); // loop per row
}
.L_ga_outer_loop:
{
r14 = add(r4,r5); // find next row source address
r15 = add(r2,r3); // find next row dest address
v0 = vmem(r4++#1); // load first vector
loop0(.L_ga_inner_loop,r11);
}
{
if( q0 ) vmem(r2++#1)= v0; // first vector store
v0= vmem(r4++#1); // load next...
}
.falign
.L_ga_inner_loop:
{
vmem(r2++#1) = v0; // just copy...
v0= vmem(r4++#1);
}:endloop0;
{
if( q1 ) vmem(r2+#0)= v0; // last vector store
r2 = r15; // move addresses to next row.
r4 = r14;
}:endloop1;
.L_alldone:
{ jumpr r31; } // !! all done
/// aligned, two per row.
.L_ga_aligned_two:
{
loop0(.L_ga_outer_loop_aligned_2,r1); // loop per row
}
.falign
.L_ga_outer_loop_aligned_2:
{
v0.cur = vmem(r4+#0); // load first vector
if( q0 ) vmem(r2+#0)= v0; // first vector store
}
{
v0.cur = vmem(r4+#1); // load first vector
if( q1 ) vmem(r2+#1)= v0; // last vector store
r2 = add(r2,r3); // find next row dest address
r4 = add(r4,r5); // find next row source address
}:endloop0;
{ jumpr r31; } // !! all done
// this is the general unaligned case. Each row will consist of:
// (1) optional preload of an 'extra' vector (if P2 is true)
// (2) load of the following vector (first if !P2)
// (3) valign of those two gives the vector for the first store (masked by q0)
// repeat r11 times:
// load another vector, valign to get a vector which is stored directly
// (4) *maybe* load a final vector (if P3 is true)
// (5) another valign to get a value for the final store (masked by q1).
// So we need to calculate P2,P3 and set up the source pointer properly now.
//
// note: Soff,Doff are the source/dest offsets
// Send = (Soff+wid-1)%VECN is the offset of the last source byte
// Dend = (Doff+wid-1)%VECN is the offset of the last dest byte
//
// Ncore is the number of 'core' vec writes (in r11,>=0)
// p2 = 1 (if Soff>=Doff) else 0 (in this situation Soff != Doff)
// p3 = 1 if( Send <Dend )else 0
//
.L_general_unaligned:
{
r15 = and(r15, #VECN-1); // offset of last source byte
r8 = and(r8, #VECN-1); // offset of last dest byte
p1 = cmp.gt(r11,#0); // true if any 'middle' vectors to deal with
p2 = !cmp.gt(r7,r10); // if src_off >= dst_off, we need extra initial load
}
{
r4 = sub(r4,r10); // align source (first vec containing needed bytes)
P3 = cmp.gt(r8,r15); // p3 true if dest-last-bvyte > src_last_byte
m0 = r5;
}
{
loop1(.L_gb_outer_loop, r1); // set up loop
// if 'p2' we actually start each row with r4 pointing at the
// second vector, and pick up the first one conditionally.
if(p2) r4 = add(r4 ,#VECN);
if(!p1) jump:nt .L_gb0_cases;
}
.falign
.L_gb_outer_loop:
{
r14 = add(r4,r5); // find next row source address
r15 = add(r2,r3); // find next row dest address
v0 = vmem(r4++#1); // load first vector
#if __HEXAGON_ARCH__ < 62
if(!p2) jump:nt .L_skipv1
}
{ v1 = vmem(r4+#-2); // load extra 'left' vector
}
.L_skipv1:
#else
}
#endif
{
#if __HEXAGON_ARCH__ >= 62
if(p2)v1.cur = vmem(r4+#-2); // load extra 'left' vector
#endif
// if p2 is false, the content of v1 is indeterminate here, but the
// indeterminate result lanes of v2 will not be stored.
v2 = valign( v0, v1, r6); // //first data to write
}
{
if(q0) vmem(r2++#1)= v2; // write first data
loop0(.L_gb_inner_loop,r11);
}
.falign
.L_gb_inner_loop:
{
v1 = vmem(r4++#1); // next vector
}
{
v2 = valign( v1, v0, r6); // align ... first data to write
v0 = v1;
vmem(r2++#1) = v2.new;
}:endloop0;
#if __HEXAGON_ARCH__ < 62
{ if(!P3) jump:nt .L_skiplast;
}
{
v1 =vmem(r4); // get last source word
}
.L_skiplast:
{
#else
{
if(p3) v1.cur = vmem(r4+#0); // get last source word
#endif
v2 = valign(v1,v0,r6); // set up last operation
}
{
if(q1) vmem(r2) = v2; // write last
r2 = r15; // next row...
r4 = r14;
}:endloop1;
{ jumpr r31; } // !! all done
// best inner loop for the unaligned case:
// { vmem(r2++#1) = v2.new; v2 = valign( v1, v0, r6); v1.cur = vmem(r4++#1); }
// { vmem(r2++#1) = v2.new; v2 = valign( v0, v1, r6); v0.cur = vmem(r4++#1); }
//
// special case. unaligned, when the inner loop count is 0
// I.e. we are writing 2 output vecs.
// there are four subcases:
// A p2 = 0 p2 = 0 # read 1 per loop; vror; write the same value twice.
// B p2 = 0, p3 = 1 # read 2 per loop align { v0, x} and { v1, v0}
// C p2 = 1 p3 = 0 # read 2 per loop align { v1,v0} and { x, v1}
// D p2 = 1 p3 = 1 # read 3 per loop; align {v1,v0} and {v2,v1}
//
.L_gb0_cases:
{ if (p2) jump:nt .L_gb0_caseCD ; if (p3) jump:t .L_gb0_caseB }
//// no middle vecs, p2=p3 = 0
// In this case, the source bytes for each row are in
// one vector; just need to vror it and write it twice.
.L_gb0_caseA:
{loop0(.L_gb0A_outer_loop, r1); // set up loop
}
.falign
.L_gb0A_outer_loop:
{
v1.cur = vmem(r4++m0); // load first vector (only vec)
v2 = vror( v1, r6); // data to write
}
{
if(q0) vmem(r2+#0)= v2; // write first data
}
{
if(q1) vmem(r2+#1) = v2; // write last
r2 = add(r2,r3); // find next row dest address
}:endloop0;
{ jumpr r31; } // !! all done
//// no middle vecs, p2=0,p3 =1
.L_gb0_caseB:
{loop0(.L_gb0B_outer_loop, r1); // set up loop
r4 = add(r4,#VECN); // point to second input vector.
}
.falign
.L_gb0B_outer_loop:
{
v0.cur = vmem(r4+#-1); // load first vector
v2 = vror( v0, r6); // //first data to write
}
{
v1.cur = vmem(r4++m0)
if(q0) vmem(r2+#0)= v2; // write first data
v0 = valign(v1,v0,r6); // last operation
}
{
if(q1) vmem(r2+#1) = v0; // write last
r2 = add(r2,r3); // find next row dest address
}:endloop0;
{ jumpr r31; } // !! all done
.L_gb0_caseCD: { if (p3) jump:nt .L_gb0_caseD }
//// no middle vecs, p2 = 1, p3 = 0;
.L_gb0_caseC:
{loop0(.L_gb0C_outer_loop, r1); // set up loop
}
.falign
.L_gb0C_outer_loop:
{
v0 = vmem(r4+#-1); // load first vector
}
{
v2 = valign( v1,v0, r6); // //first data to write
v1.cur = vmem(r4++m0); // second vector
}
{
if(q0) vmem(r2+#0)= v2; // write first data
v0 = vror(v1,r6); // last operation
}
{
if(q1) vmem(r2+#1) = v0; // write last
r2 = add(r2,r3); // find next row dest address
}:endloop0;
{ jumpr r31; } // !! all done
//// no middle vecs, p2=1,p3 =1
/// (i.e. 3 in, 2 out per row)
.L_gb0_caseD:
{loop0(.L_gb0D_outer_loop, r1); // set up loop
r4 = add(r4,#VECN); // point to last input vector.
}
.falign
.L_gb0D_outer_loop:
{
v0 = vmem(r4+#-2); // load first vector of 3
}
{ v1.cur = vmem(r4+#-1); // load second vector
v2 = valign( v1, v0, r6); // //first data to write
}
{
if(q0) vmem(r2+#0)= v2; // write first data
v0.cur =vmem(r4++m0); // get last source word
v2 = valign(v0,v1,r6); // last operation
}
{
if(q1) vmem(r2+#1) = v2; // write last
r2 = add(r2,r3); // find next row dest address
}:endloop0;
{ jumpr r31; } // !! all done
////////////////////////////////////////////////////////////////////////////////////////////
// cases where only one vector needs to be written per row
// (columns do not span a vector boundary in the output)
//
.falign
.L_short_rows: // only one output vector per row...
// test if we need two reads or just one per row.
{
r11 = add(r10,r0); // if > VECN, need more than one read per row
q0 = and(q1,q0); // generate the proper mask for the write
m1 = r3; // dest row pitch
}
{
P0=!cmp.gt(r11,#VECN); //
r8 = sub(r4,r10); // align the source
if( P0.new) jump:nt .L_short_read;
}
// ok, we need two reads per row; can just do an unaligned read.
// subtract the dest offset from the source address (note that the
// source offset must be > the dest, so this will not cross a boundary)
//
{
loop0(.L_short_short_loop,r1);
r4 = sub(r4,r7); // source address for unaligned reads
}
.falign
.L_short_short_loop:
{
v0 = vmemu(r4); // get a value
r4 = add(r4,r5);
}
{
if( q0 ) vmem(r2++m1) = v0;
}:endloop0;
{ jumpr r31; } // !! all done
.L_short_read:
// this is the case where only need one read and one write per row.
// if r6 != 0, we need to vror by r6, otherwise we can just copy.
// Also, if == 1 rows, treat as the vror case (so I don't need
// to handle both degenerate loop cases).
//
{ P0 = cmp.eq(r6,#0);
P1 = cmp.gt(r1,#1);
m0 = r5;
}
{
v0 = vmem(r8++m0); // get the first vector
p0 = and(p0,p1);
if(!p0.new) jump:nt .L_short_short_unaligned;
r1 = add(r1,#-1);
}
// aligned copy, at least 2 rows
{
loop0(.L_short_2_loop,r1);
}
.falign
.L_short_2_loop:
{
if(q0) vmem(r2++m1) = v0;
v0 = vmem(r8++m0);
}:endloop0;
{if(q0) vmem(r2+#0) = v0;
jumpr r31; } // !! all done
//
// case we only need to read one vector per row,
// but we need to do a ror. The case where ht ==1
// also gets here.
//
.falign
.L_short_short_unaligned:
{ v2 = vror(v0,r6);
if(!p1) jump:nt .L_justone;
loop0(.L_short_3_loop,r1);
}
.falign
.L_short_3_loop:
{
if(q0) vmem(r2++m1) = v2;
v0.cur = vmem(r8++m0);
v2 = vror(v0,r6);
}:endloop0
.L_justone:
{if(q0) vmem(r2+#0) = v2;
jumpr r31; } // !! all done
.LtmpX:
.size vmemcpy_2d_asm, .LtmpX-vmemcpy_2d_asm
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// General copy, which allows any rowpitch,
// by calling the other copy multiple times as needed.
// The better the alignment of the supplied row pitches, the fewer
// calls are needed.
//
// Note that this *not* check for src_pitch == dst_pitch == wid
// (which could be done in a single copy much more efficiently).
//
.globl vmemcpy_2d_general_asm
.balign 16
.type vmemcpy_2d_general_asm,@function
vmemcpy_2d_general_asm:
// r0 -> wid
// r1 -> ht
// r2 -> dstp
// r3 -> dst_pitch
// r4 -> srcp
// r5 -> src_pitch
//
// if ht <= 1, or of both row pitches are multiples of VECN, just go do
// the regular copy.
//
{
r6 = or(r3,r5)
r7= fp; // stack frame is speculative...
memd(sp+#-48)=r21:20 // since we might just jump to the other function.
allocframe(#40)
}
{
p0 = cmp.gt( r1,#1);
r6 = ct0(r6);
r8 = #1;
memd(sp+#8)=r23:22
}
{
p1 = !cmp.gt( r6,#VECLOGN-1 ); // false if aligned
p1 = p0; // false if only 1 row
if( !p1.new ) jump:nt .L_jjtoit; // jump if either condition false
r6 = sub( #VECLOGN, r6 ); // will be >= 1
}
// K = (1<<r6) is the number of loops we need.
// at least 2; at most VECN.
// (if ht is less, we only do that many, each will be 1 row).
// Note that e.g. if K=4 and ht = 33,
// Four calls are done, and the 'ht' values to the calls will be
// 9, 8, 8, 8
// this is done by finding the first height (ht/K, rounded up)
// and then figuring out when the height should be reduced by 1
// A value is placed in r27, when it matches the downcount (r26), the
// remaining copies will be smaller by 1.
//
{
r8 = asl( r8, r6); // 'K'
r23 =asl( r3, r6); // dst row pitch for each op
r22 = r2;
memd(sp+#16)=r25:24
}
{
r25 =asl( r5, r6); // src row pitch for each op
r24 = r4; // src pointer
r27 = add(r8,#-1) // K-1
memd(sp+#24)=r27:26
}
{
r26 = min( r8,r1); // # of loops to do.
r20 = r0; // width
r21 = add(r1,r27); // height + K-1
r27 = and(r1,r27); // height & (K-1)
}
{
r27 = sub(r26,r27); // K - (ht&(K-1)) [->zero when ht < K]
memd(sp+#32) = r19:18
r19:18 = combine(r5,r3) // save row pitches
r21 = asr( r21, r6) // height for the 1st operation
}
// r26 = # of loops to do (>= 1)
// r20..r25 = values for r0..5 for next call
// r27 = loop count at which 'height' (r21) needs to be 1 less
// r19,18 = original row pitches (for bumping address between call).
//
.L_rcgloop:
{
r1:0 = combine(r21,r20) // width, height
r3:2 = combine(r23,r22) // dst addr and pitch
r5:4 = combine(r25,r24) // src addr/pitch
call vmemcpy_2d_asm
}
{
r26 = add(r26,#-1); // count the loop...
r22 = add(r22,r18); // bump dest pointer by original dest pitch
r24 = add(r24,r19) // and src address too
}
{
p1 = cmp.eq(r26,r27) // time to dec 'ht'?
if( p1.new ) r21 = add(r21,#-1)
p0 = cmp.gt( r26,#0)
if( p0.new ) jump:t .L_rcgloop; // loop till done
}
// restore registers, and done...
{
r21:20 = memd(sp+#0 )
r23:22 = memd(sp+#8 )
}
{
r25:24 = memd(sp+#16 )
r27:26 = memd(sp+#24 )
}
{
r19:18 = memd(sp+#32 )
dealloc_return
}
.L_jjtoit:
/// call becomes a single call to hvx_rectangle_copy_FUNCNAME if the
// row pitches are aligned, or if ht <= 1.
// undo the 'allocframe' we started...
// and just go to hvx_rectangle_copy
// no regs have changed except fp and sp.
{ sp = add(fp,#8);
fp = r7;
jump vmemcpy_2d_asm
}
.LtmpY:
.size vmemcpy_2d_general_asm, .LtmpY-vmemcpy_2d_general_asm
|
XiaoMi/nnlib | 20,460 | hexagon/asm_src/dwconv2dbbb_s1_7xN_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : dwconv2dbbb_s1_7xN_asm
*
* DESCRIPTION
* Mx7 depthwise filter stride horz = 1 stride vert is anyting, vertic filter is
* anything.
* CODESIZE
* 896 bytes
* STACK SIZE
* 7*2*128 + 64 = 1845 bytes
*
* ARCHITECTURE : QDSP6V60+ + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/20/19 created
* ------------------------------------------------------------- */
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s1_7xN_h.S"
.global dwconv2dbbb_s1_7xN_asm
.balign 32
.type dwconv2dbbb_s1_7xN_asm, @function
dwconv2dbbb_s1_7xN_asm:
/* ----------------------------------------------------------------------------- */
//I/O regs
#define in_buf r0 //
#define filt r1 //
#define out_buf r2 //
#define next_in_width_depth r3 //
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //
#define next_out_width_32 r10 //
#define depth r11 //
#define out_width r12 //
#define out_height r13 //
#define filt_height r25 //
#define filt_zero r7 //
#define bias_sum r14 //
#define ptr_max r15 //
#define recip_level r10 //
#define recip_shift r8 //
#define stride_v r28 //
#define sbuf r14 //
//Scaler Regs
#define ptr_w0 r16 //
#define ptr_w1 r17 //
#define col_count r9 //
#define c8 r6 //
#define c24 r4 //
#define bias_ptr r20 //
#define ptr_x0 r18 //
#define ptr_x1 r19 //
#define ptr_x1ptr_x0 r19:18 //
#define ptr_xin r22 //
#define ptr_y r23 //
#define depth_cnt r26 //
#define filt_size r8 //
#define next_in_width_depth_stride r28 //
#define zzzz r7 //
#define _zzz r27 //
#define zzz_ r15 //
#define out_width4 r11 //
//Vector Regs
#define vrecip v0 //
#define vshamt_vec v1 //
#define max v2 //
#define min v3 //
#define bias_val v4 //
#define w_654 v8 //
#define w6543 v26 //
#define w3210 v28 //
#define x0 v30 //
#define x1 v29 //
#define x2 v5 //
#define w210_ v11 //
#define x3x2x1x0 v30 //
#define x7x6x5x4 v10 //
#define xbxax9x8 v5 //
#define x3x2x3x2 v6 //
#define x5x4x3x2 v6 //
#define x7x6x7x6 v9 //
#define x9x8x7x6 v9 //
#define xbxaxbxa v7 //
#define xdxcxbxa v7 //
#define s0 v12 //
#define s1 v13 //
#define s2 v14 //
#define s3 v15 //
#define z0 v16 //
#define z1 v17 //
#define z2 v18 //
#define z3 v19 //
#define d0 v20 //
#define d1 v21 //
#define d1d0 v24 //
#define d2 v22 //
#define d3 v23 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
/* =========================================================================== */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
c8 = #8 //
c24 = #24 //
} {
depth = memw(sp+#17<<2) //
} {
depth = lsr(depth, #5) //depth/32
out_width = memw(sp+#18<<2) //
filt_zero = memw(sp+#21<<2) //
} {
memw(sp+#17<<2) = depth //
out_height = memw(sp+#19<<2) //
out_width4 = add(out_width, #3) //
zzzz = vsplatb(filt_zero) //
} {
recip_shift = memw(sp+#25<<2) //
depth_cnt = memw(sp+#17<<2) //depth
out_width4 = lsr(out_width4, #2) //
_zzz = lsr(zzzz, #8) //
} {
vshamt_vec= vsplat(recip_shift) //
stride_v = memw(sp+#26<<2) //
ptr_max = memw(sp+#23<<2) //
} {
filt_size = memw(sp+#20<<2) //
max = vmem(ptr_max+#0) //
next_in_width_depth_stride = mpyi(next_in_width_depth,stride_v) //
} {
min = vmem(ptr_max+#1) //
zzz_ = asl(zzzz, #8) //
filt_height = add(filt_size, #-1) //
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{ bias_ptr = memw(sp+#22<<2) //
ptr_xin = in_buf //
ptr_w0 = filt //
recip_level = memw(sp+#24<<2)
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{
vrecip = vmem(recip_level++#1)
loop1(.L_width, out_width4) //
memw(sp+#52) = recip_level
}
{ bias_val = vmem(bias_ptr++#1) //
ptr_x1ptr_x0 = combine(ptr_xin, ptr_xin) //
sbuf = memw(sp+#27<<2) //
loop0(.L_init, filt_size) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_init:
{ x0 = vmemu(ptr_x1+#0) //0
} {
x1 = vmemu(ptr_x1+#1) //1
ptr_x1 = add(ptr_x1, next_in_width_depth) //1
} {
x0.b = vshuff(x0.b) //2
} {
x1.b = vshuff(x1.b) //3]
} {
x3x2x1x0.b = vshuff(x0.b) //4
vmem(sbuf++#1) = x3x2x1x0.new //next xb-8
} {
x7x6x5x4.b = vshuff(x1.b) //5
vmem(sbuf++#1) = x7x6x5x4.new //next xb-8
}:endloop0
{ sbuf = memw(sp+#27<<2) //
w3210 = vmem(ptr_w0+#0) //[1, 1]
ptr_x1 = ptr_x0 //[WIDTH, P]
p3 = !cmp.eq(r0, r0) //
} {
x3x2x1x0 = vmem(sbuf+#0) //[0, 0]
ptr_x0 = add(ptr_x0, #128) //[WIDTH,P]+8 32 depths for stride 2
ptr_y = out_buf //
col_count = out_width //
}
/* --------------------------------------------------------------------------- */
.L_width:
{ z0.uw = vrmpy(x3x2x1x0.ub, zzzz.ub) //[0, 3]filter even output
z1.uw = vrmpy(x3x2x1x0.ub, zzz_.ub) //[0, 3]filter even output
w210_.w = vasl(w3210.w, c8) //[0, 3]
x7x6x5x4 = vmem(sbuf+#1) //[0, 3]
} {
s0.uw = vrmpy(x3x2x1x0.ub, w3210.ub) //[0, 4]filter even output
w_654 = vmem(ptr_w0+#1) //[0, 4]
w6543.uw = vlsr(w3210.uw, c24) //[0, 4]
ptr_w1 = add(ptr_w0, #256) //[WIDTH, P]
} {
s1.uw = vrmpy(x3x2x1x0.ub, w210_.ub) //[0, 5]filter even output
x2 = vmemu(ptr_x1+#2) //[0, 5]
s0.w = vadd(bias_val.w, s0.w) //[WIDTH, P]
} {
z0.uw+= vrmpy(x7x6x5x4.ub, _zzz.ub) //[0, 6]filter even output
z1.uw+= vrmpy(x7x6x5x4.ub, zzzz.ub) //[0, 6]filter even output
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[0, 6]
w6543.w += vasl(w_654.w, c8) //[0, 6]
} {
s0.uw += vrmpy(x7x6x5x4.ub, w_654.ub) //[0, 7]filter even output
x5x4x3x2.h = vshuffe(x7x6x5x4.h, x3x2x3x2.h) //[0, 7]
x2.b = vshuff(x2.b) //[0, 7]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, 7]
} {
s1.uw += vrmpy(x7x6x5x4.ub, w6543.ub) //[0, 8]filter even output
vmem(sbuf++#1) = x7x6x5x4 //[0, 8]next xb-8
s3 = bias_val //[WIDTH, P]
loop0(.L_vert, filt_height) //[WIDTH, P]
} {
s2.uw = vrmpy(x5x4x3x2.ub, w3210.ub) //[0,10]filter even output
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[0, 9]
xbxax9x8.b = vshuff(x2.b) //[0, 9]
vmem(sbuf++#1) = xbxax9x8.new //[0,10]next xb-8
} {
z2.uw = vrmpy(x5x4x3x2.ub, zzzz.ub) //[0, 9]filter even output
z3.uw = vrmpy(x5x4x3x2.ub, zzz_.ub) //[0, 9]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h, x7x6x7x6.h) //[0,10]
s1.w = vadd(bias_val.w, s1.w) //[WIDTH, P]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_vert:
{ s3.uw += vrmpy(x5x4x3x2.ub, w210_.ub) //[0,11]filter even output
} {
z2.uw+= vrmpy(x9x8x7x6.ub, _zzz.ub) //[0,12]filter even output
z3.uw+= vrmpy(x9x8x7x6.ub, zzzz.ub) //[0,12]filter even output
x3x2x1x0 = vmem(sbuf+#0) //[1, 0]
} {
s2.uw += vrmpy(x9x8x7x6.ub, w_654.ub) //[0,13]filter even output
w3210 = vmem(ptr_w1++#1) //[1, 1]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w6543.ub) //[0,13]filter even output
} {
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[1, 3]filter even output
z1.uw += vrmpy(x3x2x1x0.ub, zzz_.ub) //[1, 3]filter even output
w210_.w = vasl(w3210.w, c8) //[1, 3]
x7x6x5x4 = vmem(sbuf+#1) //[1, 3]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[1, 4]filter even output
w_654 = vmem(ptr_w1++#1) //[1, 4]
w6543.uw = vlsr(w3210.uw, c24) //[1, 4]
} {
s1.uw += vrmpy(x3x2x1x0.ub, w210_.ub) //[1, 5]filter even output
x2 = vmemu(ptr_x1+#2) //[1, 5]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 5]
} {
z0.uw+= vrmpy(x7x6x5x4.ub, _zzz.ub) //[1, 6]filter even output
z1.uw+= vrmpy(x7x6x5x4.ub, zzzz.ub) //[1, 6]filter even output
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[1, 6]
w6543.w += vasl(w_654.w, c8) //[1, 6]
} {
s0.uw += vrmpy(x7x6x5x4.ub, w_654.ub) //[1, 7]filter even output
x5x4x3x2.h = vshuffe(x7x6x5x4.h, x3x2x3x2.h) //[1, 7]
x2.b = vshuff(x2.b) //[1, 7]
} {
s1.uw += vrmpy(x7x6x5x4.ub, w6543.ub) //[1, 8]filter even output
vmem(sbuf++#1) = x7x6x5x4 //[1, 8]next xb-8
} {
z2.uw += vrmpy(x5x4x3x2.ub, zzzz.ub) //[1, 9]filter even output
z3.uw += vrmpy(x5x4x3x2.ub, zzz_.ub) //[1, 9]filter even output
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[1, 9]
xbxax9x8.b = vshuff(x2.b) //[1, 9]
} {
s2.uw += vrmpy(x5x4x3x2.ub, w3210.ub) //[1,10]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h, x7x6x7x6.h) //[1,10]
vmem(sbuf++#1) = xbxax9x8 //[1,10]next xb-8
}:endloop0
/* --------------------------------------------------------------------------- */
{ s3.uw += vrmpy(x5x4x3x2.ub, w210_.ub) //[1,11]filter even output
s0.w = vsub(s0.w, z0.w) //[WIDTH]
s1.w = vsub(s1.w, z1.w) //[WIDTH]
col_count = add(col_count, #-4) //[WIDTH]
} {
z2.uw+= vrmpy(x9x8x7x6.ub, _zzz.ub) //[1,12]filter even output
z3.uw+= vrmpy(x9x8x7x6.ub, zzzz.ub) //[1,12]filter even output
s2.w = vadd(s2.w, bias_val.w) //[WIDTH, P]
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
} {
s2.uw += vrmpy(x9x8x7x6.ub, w_654.ub) //[1,13]filter even output
s0.w = vasl(s0.w, vshamt_vec.w) //
s3.w = vsub(s3.w, z3.w) //[WIDTH]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w6543.ub) //[0,13]filter even output
s2.w = vsub(s2.w, z2.w) //[WIDTH,E]
s1.w = vasl(s1.w, vshamt_vec.w) //
p0 = !cmp.gt(col_count, #-3) //
} {
d0.w = vmpye(s0.w, vrecip.uh) //[W,15]multiply by 1/max
if(p0) s1 = s0 //[WIDTH] if over gen'd write valid val
s2.w = vasl(s2.w, vshamt_vec.w) //[WIDTH]
p0 = !cmp.gt(col_count, #-2) //
} {
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
d0.w += vmpyo(s0.w, vrecip.h):SSR //[W,17]3
if(p0) s2 = s0 //
} {
if(p3) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
min.w = vmin(min.w, d0.w) //[0,22]8 //0+2+1
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]9
s3.w = vasl(s3.w, vshamt_vec.w) //
max.w = vmax(max.w, d0.w) //[0,18]5 //0+2+1
p0 = !cmp.gt(col_count, #-1) //
} {
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
if(p0) s3 = s0 //
min.w = vmin(min.w, d1.w) //[0,27]13 //0+2+1
sbuf = memw(sp+#27<<2) //[WIDTH,P]
} {
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]3
x3x2x1x0 = vmem(sbuf+#0) //[P, 0]
max.w = vmax(max.w, d1.w) //[0,26]12 //0+2+1
p3 = cmp.eq(r0, r0) //
} {
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
max.w = vmax(max.w, d2.w) //[0,18]5 //0+2+1
min.w = vmin(min.w, d2.w) //[0,22]8 //0+2+1
} {
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]9
w3210 = vmem(ptr_w0+#0) //[1, 1]
ptr_x1 = ptr_x0 //[WIDTH, P]
} {
min.w = vmin(min.w, d3.w) //[0,27]13 //0+2+1
max.w = vmax(max.w, d3.w) //[0,26]12 //0+2+1
ptr_x0 = add(ptr_x0, #128) //[WIDTH,P]+8 32 depths for stride 2
}:endloop1 //end width
/* --------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
ptr_w0 += asl(filt_size, #8) //[DEPTH,E]filt_size = filt_height*256 //4*3*64
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
next_out_width_32 = memw(sp+#16<<2)
} {
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
loop1(.L_width, out_width4) //[DEPTH]
} {
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
recip_level = memw(sp+#52)
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]
if(!p0) jump .L_depth //[DEPTH,E]
if(p0) out_height = add(out_height, #-1) //[HEIGHT]
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //height
depth_cnt = memw(sp+#17<<2) //depth
in_buf=add(in_buf,next_in_width_depth_stride)//stride
if(!p0.new) jump:nt .L_height //height
}//end height
/* ----------------------------------------------------------------------------- */
{ ptr_max = memw(sp+#23<<2) //
} {
r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s1_7xN_asm, .L_end-dwconv2dbbb_s1_7xN_asm
/* ----------------------------------------------------------------------------- */
|
XiaoMi/nnlib | 20,085 | hexagon/asm_src/gsum_h.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
Desrciption
Perform 1d integral over activation inputs of size in_depth. The in_depth blocks are
collapsed to 1 int.
Requirements
Input must be aligned to 128 bytes, data can be non-multiple of 128bytes. Final store will
write only to amount needed. If multithreaded the data mut be split into multiples of 32 inputs
with final call having non -multiple of 32. E.g 35*35 into 4 threads.
3 * 320 + 1 * 265
Codesize
256 bytes, no stack
*/
#if 0
void gvsum_cn(uint8_t * in_data, int * out_sum, int in_width, int in_depth, int out_height,
int stride_vert, int filt_offset)
{
int i,j,k,l,sum;
for(j=0; j < out_height; j+=1) {
for(i=0; i < in_width; i++) {
sum = 0;
for(l = 0; l < in_depth/32; l++) {
for(k=0; k < 32; k++)
sum -= filt_offset*in_data[in_depth*in_width*stride_vert*j+32*in_width*l+32*i+k];
}
*out_sum++ = sum;
}
}
}
/* ------------------------------------------------------------------------------------------ */
.text
.global gsum_asm
.balign 32
.type gsum_asm, @function
gsum_asm:
/* ------------------------------------------------------------------------------------------ */
#define in_ptr r0 //ptr to activation bytes
#define out_ptr r1 //ptr to output int sums
#define width r2 //in == out width of image logical % 4
#define depth r3 //indepth multiple of 32
#define out_height r4 //number of logical output lines
#define stride_vert r5 //vertical stride
#define filt_offset r6 //offset for weights
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug print vector reg
/* ------------------------------------------------------------------------------------------ */
#define c32 r9 //const 32
#define c16 r8 //const 16
#define c32c16 r9:8 //paked consts
#define cm4 r7 //-4 for dealing out ints
#define width_depth r5 //width * depth
#define in_ptr0 r10 //temp input ptr
#define in_ptr1 r4 //temp input ptr
#define col_count r12 //count out width
#define store_count r13 //store every 8 or 10 iterations
#define store_col_count r13:12 //apcked counters
#define next_d32_row r13 //width of sum image in bytes
#define area r4 //total number of computations
#define cm1cm4 r15:14 //packet constants
#define total_count r16 //num bytes left to store
#define mdst r4
#define dalign r12
/* ------------------------------------------------------------------------------------------ */
#define qprolog q1
#define qepilog q2
#define d3d2d1d0 v11 //data from activations
#define a8a8a8a8 v12 //packed sum of activations 4 to 1
#define s8s8s8s8 v13 //sum of activations across depths 32
#define s8s8s8s8h_s8s8s8s8l v17:16 //dealt activations odd and even
#define s8s8s8s8h v17 //dealt activations odd
#define s8s8s8s8l v16 //dealt activations even
#define acc31_00 v10 //fifo containing history of sums of depths
#define aout v14 //temp output
#define zero v15 //zero out reg
/* --------------------------------------------------------------------------------------- */
{
allocframe(#16)
filt_offset = memw(sp+#0<<2) //filter offset
} {
memd(sp+#0) = r17:16 //
cm4 = #-4 //constant -4
p1 = and(p1, !p1) //set p1 to 0
next_d32_row = asl(width, #5) //width of row in bytes
} {
c32c16 = combine(#32, #16) //setup consts
M0 = next_d32_row //set up modifier reg
} {
q1 = vsetq(c16) //--|___
area = mpyi(width, out_height) //total number of sums
filt_offset = vsplatb(filt_offset) //pack into bytes
} {
width_depth = mpy(stride_vert.L, depth.L) //create vertical stride
total_count = asl(area, #2) //get int mask input
r14 = #0x7f //look at low 7 bits
q0 = vsetq(c32) //----|____
} {
p0 = bitsclr(total_count, r14) //are low 7 bits 0?
width_depth = mpyi(width_depth, width) //vertical stride
cm1cm4 = combine(#-1, #-4) //pack constsn
} {
area = lsr(area, #2) //4 pts per inner loop
depth = lsr(depth, #5) //how many 32byte chunks to sum
in_ptr0 = in_ptr //temp in ptr
} {
area = add(area, #2) //7 * 4 + 2 * 4
in_ptr = add(in_ptr, width_depth) //update input ptr to next row
} {
loop1(.L_while, area) //number of computations
q0 = and(q0, !q1) //__|--|__ vec predicate
store_col_count = combine(#10, width) //p0 = 0
p0 = and(p0, !p0)
} {
loop0(.L_sum, depth) //set up inner loop of horz sum
in_ptr1 = in_ptr0 //temp ptr
p1 = and(p1, !p1) //p1 = 0
zero = #0 //#0
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_while:
{ a8a8a8a8 = #0 //clear accumulator
in_ptr0 = add(in_ptr0, #128) //update to next 4 depth
acc31_00 = vmux(q0, s8s8s8s8, acc31_00) //merge in 4 new ints
if(p0) in_ptr = add(in_ptr, width_depth) //if at end of row, next row
}
.L_sum:
{ d3d2d1d0.tmp = vmem(in_ptr1++M0) //load 4 new depths
a8a8a8a8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4 make negative
if(p1) store_count = #8 //if 8 or 10 iterations reset
if(p0) col_count = width //reset line count
}:endloop0
{ store_col_count = vaddw(store_col_count, cm1cm4) //decrement counters
loop0(.L_sum, depth) //set up inner loop of horz sum
aout= vror(acc31_00, c32) //rotate out of way 16bytes
p0 = cmp.eq(col_count, #4) //if col count expired
} {
s8s8s8s8h_s8s8s8s8l = vdeal(a8a8a8a8,s8s8s8s8,cm4) //deal in new outputs
aout.w = vsub(zero.w, aout.w) //make sum -ve
if(p0) in_ptr0 = in_ptr //update to next indepth*in_width
} {
if(p1) vmemu(out_ptr++#1) = aout //write after 10 then 8 finished ints
if(p1) total_count = add(total_count, #-128) //
} {
s8s8s8s8.w = vadd(s8s8s8s8h.w, s8s8s8s8l.w) //32sums of 8 final block into tmp
acc31_00 = vror(acc31_00, c16) //rotate out fo way 16bytes
in_ptr1 = in_ptr0 //update vertical depth count
p1 = cmp.eq(store_count, #0) //ready to store new set of sums?
}:endloop1
{
acc31_00 = vmux(q0, s8s8s8s8, acc31_00) //merge in 4 new ints
mdst = and(out_ptr, #127) //ptr to mod 128
qprolog = vsetq(out_ptr) //find dist to ptr
c32 = sub(#32, total_count) //compute how many values left
} {
dalign = add(mdst, total_count) //dist to end of data
mdst = sub(c32, out_ptr) //create left rotate
} {
aout = vror(acc31_00, mdst) //rotate left by ptr
} {
qepilog = vsetq(dalign) //do mask for 2nd store
p1 = cmp.gt(dalign, #127) //is it a double store?
if(p1.new) jump:nt .L_gt1280 //skip over logic for 1 part store
aout.w = vsub(zero.w, aout.w) //make sum negative
} {
qprolog = or(qprolog, !qepilog) //compound 2 masks
qepilog = and(qprolog, !qprolog) //cancel 2nd mask
}
.L_gt1280:
{ if(!qprolog) vmem(out_ptr+#0) = aout //store 1st part, in ptr
} {
if( qepilog) vmem(out_ptr+#1) = aout //cond store 2nd part
r17:16 = memd(sp+#0)
} {
dealloc_return //return
}
.L_end:
/*=============================================================================*/
.size gsum_asm, .L_end-gsum_asm
/*=============================================================================*/
#else
/* ------------------------------------------------------------------------------------------ */
.text
.global gsum_asm
.balign 32
.type gsum_asm, @function
gsum_asm:
/* ------------------------------------------------------------------------------------------ */
#define in_ptr r0 //ptr to activation bytes
#define out_ptr r1 //ptr to output int sums
#define width r2 //in == out width of image logical % 4
#define depth r3 //indepth multiple of 32
#define out_height r4 //number of logical output lines
#define stride_vert r5 //vertical stride
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug print vector reg
/* ------------------------------------------------------------------------------------------ */
#define width_depth r5 //width * depth
#define cm4 r6 //-4 for dealing out ints
#define cm1cm4 r7:6 //packet constants
#define c32 r9 //const 32
#define c16 r8 //const 16
#define c32c16 r9:8 //paked consts
#define in_ptr0 r10 //temp input ptr
#define in_ptr1 r11 //temp input ptr
#define col_count r12 //count out width
#define store_count r13 //store every 8 or 10 iterations
#define store_col_count r13:12 //apcked counters
#define next_d32_row r13 //width of sum image in bytes
#define area r14 //total number of computations
#define out_count r14
#define filt_offset r15 //offset for weights
#define offset r28
/* ------------------------------------------------------------------------------------------ */
#define d3d2d1d0 v11 //data from activations
#define a8a8a8a8 v12 //packed sum of activations 4 to 1
#define s8s8s8s8 v13 //sum of activations across depths 32
#define s8s8s8s8h_s8s8s8s8l v17:16 //dealt activations odd and even
#define s8s8s8s8h v17 //dealt activations odd
#define s8s8s8s8l v16 //dealt activations even
#define acc31_00 v10 //fifo containing history of sums of depths
#define aout v14 //temp output
#define zero v15 //zero out reg
/* --------------------------------------------------------------------------------------- */
{ filt_offset = memw(sp+#0<<2) //filter offset
c32c16 = combine(#32, #16) //setup consts
next_d32_row = asl(width, #5) //width of row in bytes
} {
M0 = next_d32_row //set up modifier reg
area = mpyi(width, out_height) //total number of sums
q1 = vsetq(c16) //--|___
in_ptr0 = in_ptr //temp in ptr
} {
filt_offset = vsplatb(filt_offset) //pack into bytes
width_depth = mpy(stride_vert.L, depth.L) //create vertical stride
q0 = vsetq(c32) //----|____
zero = #0 //#0
} {
width_depth = mpyi(width_depth, width) //vertical stride
cm1cm4 = combine(#-1, #-4) //pack constsn
offset = extractu(width,#3,#2) //
q0 = and(q0, !q1) //__|--|__ vec predicate
} {
area = lsr(area, #2) //4 pts per inner loop
depth = lsr(depth, #5) //how many 32byte chunks to sum
offset = add(offset,#-1) //
} {
loop1(.L_while, area) //number of computations
p0 = or(p0, !p0) //
store_count = #10 //
offset = and(offset,#7) //
} {
p2 = not(p0) //
loop0(.L_sum, depth) //set up inner loop of horz sum
}
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_while:
{ if (!p2) out_count = width //
in_ptr1 = in_ptr0 //update vertical depth count
in_ptr0 = add(in_ptr0, #128) //update to next 4 depth
p1 = cmp.eq(store_count, #0) //ready to store new set of sums?
} {
a8a8a8a8 = #0 //clear accumulator
acc31_00 = vmux(q0, s8s8s8s8, acc31_00) //merge in 4 new ints
if(p0) in_ptr = add(in_ptr, width_depth) //if at end of row, next row
if(p0) col_count = width //reset line count
}
.L_sum:
{ d3d2d1d0.tmp = vmem(in_ptr1++M0) //load 4 new depths
a8a8a8a8.uw += vrmpy(d3d2d1d0.ub, filt_offset.ub) //32 sums of 4 make negative
nop; nop //
}:endloop0
{ store_col_count = vaddw(store_col_count, cm1cm4) //decrement counters
loop0(.L_sum, depth) //set up inner loop of horz sum
aout= vror(acc31_00, c32) //rotate out of way 16bytes
p0 = cmp.eq(col_count, #4) //if col count expired
} {
s8s8s8s8h_s8s8s8s8l = vdeal(a8a8a8a8,s8s8s8s8,cm4) //deal in new outputs
aout.w = vsub(zero.w, aout.w) //make sum -ve
if(p1) vmem(out_ptr++#1) = aout.new //write after 10 then 8 finished ints
if(p0) in_ptr0 = in_ptr //update to next indepth*in_width
} {
if (p1) store_count = #7 //if 8 or 10 iterations reset
if (p1) out_count = sub(out_count,c32) //
acc31_00 = vror(acc31_00, c16) //rotate out fo way 16bytes
} {
p2 = cmp.gt(out_count,#0) //
if (!p2.new) store_count = offset //
if (!p2.new) out_height = add(out_height,#-1) //
s8s8s8s8.w = vadd(s8s8s8s8h.w, s8s8s8s8l.w) //32sums of 8 final block into tmp
}:endloop1
/* --------------------------------------------------------------------------------------- */
.balign 32
.L_post:
{ p1 = cmp.eq(store_count, #0) //ready to store new set of sums?
if (!p2) out_count = width //
acc31_00 = vmux(q0, s8s8s8s8, acc31_00) //merge in 4 new ints
store_count = add(store_count, #-1) //decrement counters
} {
s8s8s8s8h_s8s8s8s8l = vdeal(a8a8a8a8,s8s8s8s8,cm4) //deal in new outputs
if (p1) store_count = #7 //if 8 or 10 iterations reset
if (p1) out_count = sub(out_count,c32) //
} {
aout= vror(acc31_00, c32) //rotate out of way 16bytes
p2 = cmp.gt(out_count,#0) //
if (!p2.new) store_count = offset //
if (!p2.new) out_height = add(out_height,#-1) //
} {
p0 = cmp.gt(out_height,#0) //
s8s8s8s8.w = vadd(s8s8s8s8h.w, s8s8s8s8l.w) //32sums of 8 final block into tmp
} {
aout.w = vsub(zero.w, aout.w) //make sum -ve
if(p1) vmem(out_ptr++#1) = aout.new //write after 10 then 8 finished ints
acc31_00 = vror(acc31_00, c16) //rotate out fo way 16bytes
if (p0) jump .L_post //
}
/* --------------------------------------------------------------------------------------- */
{ jumpr r31 //return
}
.L_end:
/*=============================================================================*/
.size gsum_asm, .L_end-gsum_asm
/*=============================================================================*/
#endif
|
XiaoMi/nnlib | 20,025 | hexagon/asm_src/gvconv2dbbw_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvmmpybbw_asm */
/* */
/* DESCRIPTION */
/* Perform gvm vector matrix multiply, result left at */
/* 32bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 03/07/16 created */
/* DJH 05/10/16 added post add for x and y offset*/
/* DJH 07/10/16 rewrote pre-transpose */
/* DJH 09/16/16 fix over prefetch by 16 now 8 */
/*======================================================================*/
/* CYCLE-COUNT: */
/* -> K*N/256+11*N/4+24 */
/* */
/* MEMORY */
/* CODESIZE = 960 bytes */
/* STACK = 48 bytes */
/* ASSUMPTIONS */
/* y and z are 128 byte aligned */
/* x is 8byte aligned */
/* N%4=0 K%16=0 M%32=0 */
/* C MODEL */
/*======================================================================*/
#if 0
void gvmmpybbw_cn(uint8 * a, uint8 * b, int * c, int N, int M, int K) {
int i, j, k;
int32 sum;
uint8 a_val, b_val;
for (j=0; j < M; j++) {
for (i=0; i < N; i++) {
sum = 0;
for (k=0; k < K; k++) {
a_val = a[i*K+k];
b_val = b[k*M+j];
sum += a_val * b_val ;
}
c[i*M+j] = sum;
}
}
return;
}
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbw_h.S"
.global gvconv2dbbw_asm
.balign 32
.type gvconv2dbbw_asm, @function
gvconv2dbbw_asm:
/*=============================================================================*/
#define ptr_x r0 //data
#define ptr_yi r1 //weights
#define ptr_z r2 //results
#define in_width r3 //(pad_x+in_width) * depth
#define out_width r4 //out_width
#define m r5 //is stride of the output matrix always mult of 32
#define stride_depth r6 //stride|depth between computations
#define filt_width r7 //depth*filt_width
#define filt_height r8 //filt_hieght lines per filter
#define out_height r9 //number of vertical lines to perform
#define ptr_datasum r10
#define ptr_weightsum r11
#define ptr_max r12
#define in_offset r14
#define PREFETCH 64
/*=============================================================================*/
#define filt_skip r13 //the skip back after the fot_width is done for next filt_y
#define stride3_1 r1
#define ptr_x0 r11
#define stride4 r13 //
#define stride r25
#define depth r6
#define next_outputs r23 //jump to input ptr for next set of outputs
#define ptr_y r24 //
#define col_count r22
#define xsum r0 //kernel sum * filt_offset computed externally
#define pre_x r26
#define fetch_count r27
#define c4 r6
#define MSTRIDE M0 //stride*depth
#define M4STRIDE_1 M1 //3*stride*depth-16 0-1-2-3
//01234567
#define x07x04x03x00 r21:20 //11-----1
#define x07x04 r21 //11-----1
#define x03x00 r20 //1------1
#define x0fx0cx0bx08 r15:14 //1111---1
#define x0fx0c r15 //1111---1
#define x0bx08 r14 //111----1
#define x17x14x13x10 r19:18 //11------
#define x17x14 r19 //11------
#define x13x10 r18 //1-------
#define x1fx1cx1bx18 r17:16 //1111----
#define x1fx1c r17 //1111----
#define x1bx18 r16 //111-----
#define x27x24x23x20 r21:20 //---111--
#define x27x24 r21 //---111--
#define x23x20 r20 //---11---
#define x2fx2cx2bx28 r19:18 //---1111-
#define x2fx2c r19 //---11111
#define x2bx28 r18 //---1111-
#define x37x34x33x30 r15:14 //----11--
#define x37x34 r15 //----11--
#define x33x30 r14 //----1---
#define x3fx3cx3bx38 r17:16 //----1111
#define x3fx3c r17 //----1111
#define x3bx38 r16 //----111-
/*=============================================================================*/
#define z0 v0 //
#define z1 v1 //
#define z1z0 v1:0 //
#define z2 v2 //
#define z3 v3 //
#define z3z2 v3:2 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define vwsum v15 //
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define vc8000 v14 //
/*=============================================================================*/
{
stride_depth = memw(sp+#0<<2) //extract stride*depth
filt_width = memw(sp+#1<<2) //extract filt_width*depth
} {
filt_height = memw(sp+#2<<2) //extract filt_height
out_height = memw(sp+#3<<2) //number of output lines
m = asl(m, #2) //in ints
} {
ptr_datasum = memw(sp+#4<<2) //data sum ptr
ptr_weightsum = memw(sp+#5<<2) //ptr pre computed weight sum
} {
ptr_max = memw(sp+#6<<2) //ptr pre computed max value in output
allocframe(#64) //
} {
memd(sp+#32) = r25:24 //
memd(sp+#0) = r17:16 //
stride = lsr(stride_depth, #16) //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
stride_depth = mpy(stride_depth.H, stride_depth.L)
} {
M0 = stride_depth //
memd(sp+#8) = r19:18 //
memd(sp+#40) = r27:26 //
} {
memw(sp+#48) = ptr_x //
memw(sp+#52) = ptr_yi //
} {
vwsum = vmem(ptr_weightsum+#0) //
stride3_1 = addasl(stride_depth, stride_depth,#1) //3*stride
r16 = ##0x80000001 //max negative
} {
stride3_1 = sub(#16, stride3_1) //
next_outputs = mpyi(filt_height, in_width)
vc8000 = vsplat(r16) //
memw(sp+#56) = out_width //
} {
M1 = stride3_1 // add to
stride4= asl(stride_depth, #1) //4-2*stride to corrct for outper pipeline
stride3_1 = add(stride3_1, #16) //used for dc prefetch
p3 = cmp.gt(stride_depth, #96) //is !(D <= 96) heuristic to fix prefetch
} {
memw(sp+#60) = m //
next_outputs = sub(next_outputs, stride4)
filt_skip = sub(filt_width, in_width)
filt_width = lsr(filt_width, #4) //filt_width / 16
} {
maxe= vmem(ptr_max+#0)
in_width = mpyi(in_width, stride) //
filt_width = add(filt_width, #-1)
if(p3) stride3_1 = sub(stride3_1, stride_depth) //used for dc prefetch
}
/*============================================================================*/
.balign 32
.L_height:
{
ptr_x0 = memw(sp+#48) //ptr_x
out_height = add(out_height, #-1) //
} {
col_count = memw(sp+#56) //out_width
memw(sp+#48) += in_width //ptr_x=add(ptr_x,in_width) //ptr_x+=in_width
pre_x = add(ptr_x0, #PREFETCH)
}
.balign 32
.L_width:
{
ptr_y = memw(sp+#52) //ptr_y = ptr_yi initialize filter pointer
fetch_count = #0
} {
loop1(.L_filt_height, filt_height) //[P, 0]for(filt_y=0; filt_y < n; filt_y+=1){
y0 = vmem(ptr_y++#2) //[0, 0]32x4
dcfetch(pre_x)
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z1z0 = vcombine(vwsum, vwsum) //[P, 0]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
} {
z3z2 = vcombine(vwsum, vwsum) //[P, 0]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}
.balign 32
.L_filt_height:
{
z0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 4]
y2 = vmem(ptr_y++#2) //[0, 4]32x4
dcfetch(pre_x)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 5]
y3 = vmem(ptr_y+#-1) //[0, 5]32x4
pre_x = add(pre_x, stride_depth)
} {
p3 = cmp.eq(fetch_count, #1) //[0, 6]
fetch_count = add(fetch_count, #1) //[0, 6]
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[0, 6]
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[0, 6]
} {
if(p3) fetch_count = #0 //[0, 6.5]
if(p3) pre_x = add(pre_x, stride3_1) //[0, 6.5]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[0, 6.5]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[0, 6.5]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[0, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[0, 7]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[0, 7]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[0, 7]
}
.balign 32
.L_filt_width:
{
z2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 8]
y0 = vmem(ptr_y++#2) //[1, 0]32x4
dcfetch(pre_x)
} {
z2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 9]
y1 = vmem(ptr_y+#-1) //[1, 1]32x4
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[0,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[0,10]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[1, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[1, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[0,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[0,11]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[1, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[1, 3]
} {
z0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 4]
z1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 4]
y2 = vmem(ptr_y++#2) //[1, 4]32x4
dcfetch(pre_x)
} {
z0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 5]
z1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 5]
y3 = vmem(ptr_y+#-1) //[1, 5]32x4
pre_x = add(pre_x, stride_depth)
} {
p3 = cmp.eq(fetch_count, #1) //[1, 6]
fetch_count = add(fetch_count, #1) //[1, 6]
z0.uw += vrmpy(y2.ub, x0bx08.ub) //[1, 6]
x2fx2cx2bx28 = memd(ptr_x0+#8) //[1, 6.5]
} {
z1.uw += vrmpy(y2.ub, x1bx18.ub) //[1, 6]
if(p3) fetch_count = #0 //[0, 6.5]
if(p3) pre_x = add(pre_x, stride3_1) //[0, 6.5]
x27x24x23x20 = memd(ptr_x0++MSTRIDE) //[1, 6.5]
} {
z0.uw += vrmpy(y3.ub, x0fx0c.ub) //[1, 7]
z1.uw += vrmpy(y3.ub, x1fx1c.ub) //[1, 7]
x3fx3cx3bx38 = memd(ptr_x0+#8) //[1, 7]
x37x34x33x30 = memd(ptr_x0++M4STRIDE_1)//[1, 7]
}:endloop0
{
z2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 8]
z3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 8]
ptr_x0 = sub(ptr_x0, filt_skip) //[E, 0]move to next line ptr_y keeps going
y0 = vmem(ptr_y++#2) //[0, 0]32x4
} {
fetch_count = #0
pre_x = add(ptr_x0, #PREFETCH) //
z2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 9]
z3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 9]
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/16 - 1
y1 = vmem(ptr_y+#-1) //[0, 1]32x4
dcfetch(pre_x)
pre_x = add(pre_x, stride_depth)
} {
z2.uw += vrmpy(y2.ub, x2bx28.ub) //[1,10]
z3.uw += vrmpy(y2.ub, x3bx38.ub) //[1,10]
x0fx0cx0bx08 = memd(ptr_x0+#8) //[0, 2]
x07x04x03x00 = memd(ptr_x0++MSTRIDE) //[0, 2]
} {
z2.uw += vrmpy(y3.ub, x2fx2c.ub) //[1,11]
z3.uw += vrmpy(y3.ub, x3fx3c.ub) //[1,11]
x1fx1cx1bx18 = memd(ptr_x0+#8) //[0, 3]
x17x14x13x10 = memd(ptr_x0++MSTRIDE) //[0, 3]
}:endloop1
{
ptr_x0 = sub(ptr_x0, next_outputs) //
xsum = memw(ptr_datasum++#1<<2) //
} {
x0 = vsplat(xsum) //
m = memw(sp+#60) //
//pre_x = sub(pre_x, next_outputs) //
pre_x = add(ptr_x0, #PREFETCH)
} {
z0.w = vadd(z0.w, x0.w) //
vmem(ptr_z+#0):nt = z0.new //[E, ]
ptr_z = add(ptr_z, m) //
p0 = cmp.gt(col_count, #1) //
} {
maxe.w = vmax(maxe.w, z0.w) //
if(p0) xsum = memw(ptr_datasum++#1<<2) //
} {
dcfetch(ptr_x0)
x1 = vsplat(xsum) //
} {
z1.w = vadd(z1.w, x1.w) //
if(p0)vmem(ptr_z+#0):nt = z1.new //[E, ]
if(p0)ptr_z = add(ptr_z, m) //
p1 = cmp.gt(col_count, #2) //
} {
if(!p0) z1 = vc8000 //
if(p1) xsum = memw(ptr_datasum++#1<<2) //
} {
dcfetch(ptr_x0+#32)
maxe.w = vmax(maxe.w, z1.w) //
x2 = vsplat(xsum) //
} {
z2.w = vadd(z2.w, x2.w) //
if(p1)vmem(ptr_z+#0):nt = z2.new //[E, ]
if(p1)ptr_z = add(ptr_z, m) //
p0 = cmp.gt(col_count, #3) //
} {
if(!p1) z2 = vc8000 //
if(p0) xsum = memw(ptr_datasum++#1<<2) //
} {
maxe.w = vmax(maxe.w, z2.w) //
x3 = vsplat(xsum) //
col_count = add(col_count, #-4) //
} {
z3.w = vadd(z3.w, x3.w) //
if(p0)vmem(ptr_z+#0):nt = z3.new //[E, ]
if(p0)ptr_z = add(ptr_z, m) //
} {
if(!p0) z3 = vc8000 //
} {
maxe.w = vmax(maxe.w, z3.w) //
p2 = cmp.gt(col_count, #0) //
if(p2.new) jump:t .L_width //
}//end cols per line
{
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
{
loop0(.L_peak, #5) //[P, 0]
c4 = #4 //
}
.L_peak:
{
maxomaxe=vshuff(maxe,maxe,c4) //[0, 0]
} {
maxe.w = vmax(maxo.w, maxe.w) //[0, 1]
c4 = add(c4, c4) //[0, 1]
}:endloop0
{ vmem(ptr_max+#0) = maxe //[E, 0]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbw_asm, .L_end-gvconv2dbbw_asm
|
XiaoMi/nnlib | 21,406 | hexagon/asm_src/dwconv2dbbb_s2_7xN_h.S | /*
Behavioral C model
*/
#if 0
void dwconv2dbbb_MxN_cn(
uint8_t *in_buf,
uint8_t *filt,
uint8_t *out_buf,
int next_in_width,
int next_out_width,
int next_in_width_32,
int next_out_width_32,
int depth,
int out_width,
int out_height,
int filt_width,
int filt_height,
int filt_zero,
int32_t *bias_sum,
int32_t *max,
int recip_level,
int recip_shift,
int stride_width,
int stride_height)
{
int out_y, d, out_x, ur, in_val, filt_val;
int out_z, filt_y, filt_x, cnt;
int out_width_pad = (out_width+3)&(~3);
int32_t sum, zum, sum0;
int64_t lsum ;
int o_filt_width = (filt_width+3)&(~3);
int buf_offset;
for (out_y = 0; out_y < out_height; out_y++) {
cnt = out_width;
for (out_x = 0; out_x < out_width_pad; out_x+=4) {
cnt -= 4;
for(d=0; d < depth/32; d++) {
for (out_z = 0; out_z < 32; out_z++) {
for(ur=0; ur < 4; ur++)
{
sum = (int32_t)bias_sum[32*d+out_z];
zum = 0;
for (filt_y = 0; filt_y < filt_height; filt_y++) {
for (filt_x = 0; filt_x < o_filt_width; filt_x++) {
buf_offset = (out_y * stride_height + filt_y) * next_in_width
+ d * next_in_width_32
+ (out_x*stride_width + ur*stride_width + filt_x) * 32
+ out_z;
in_val = in_buf[buf_offset];
filt_val = filt[32*d*filt_height*o_filt_width
+ (o_filt_width*filt_y)*32
+ out_z*4 + 128*(filt_x/4)
+ (filt_x % 4)] ;
sum += (uint32_t)in_val*(int32_t)filt_val;
if(filt_x < filt_width)
zum += (uint32_t)in_val*(int32_t)filt_zero;
}
}
sum = sum - zum;
if(ur==0) sum0 = sum;
if(ur == 1 && !(cnt > -3)) sum = sum0;
if(ur == 2 && !(cnt > -2)) sum = sum0;
if(ur == 3 && !(cnt > -1)) sum = sum0;
sum <<= recip_shift;
lsum = (int64_t)sum * ((int64_t)recip_level) + 0x40000000LL;
lsum = lsum >> 31;
sum = (int)lsum;
max[out_z] = (sum > max[out_z]) ? sum : max[out_z];
max[out_z+32] = (sum < max[out_z+32]) ? sum : max[out_z+32];
if(lsum < 0) lsum = 0; if(lsum > 0xffll) lsum = 0xffll;
out_buf[out_y * next_out_width
+ 32 * (out_x+ur)
+ d * next_out_width_32
+ out_z] = (uint8_t) lsum;
}//ur
}//out_z
}//d
}//out_x
}//out_y
return;
}
#endif
/*
CODESIZE 912 bytes
STACK 1856 bytes
PACKETS = height*(
(depth/32)*(
(width/4)*(
filt_height*12+11
)+
filt_height*6+7+
)+2
)+17
*/
/* ----------------------------------------------------------------------------- */
.text
.file "dwconv2dbbb_s2_7xN_h.S"
.global dwconv2dbbb_s2_7xN_asm
.balign 32
.type dwconv2dbbb_s2_7xN_asm, @function
dwconv2dbbb_s2_7xN_asm:
/* ----------------------------------------------------------------------------- */
//I/O registers
#define in_buf r0 //
#define filt r1 //
#define out_buf r2 //
#define next_in_width_depth r3 //
#define next_out_width_depth r4 //currently unused
#define next_in_width_32 r5 //
#define next_out_width_32 r10 //
#define depth r11 //
#define out_width r12 //
#define out_height r13 //
#define filt_height r25 //
#define filt_zero r7 //
#define bias_sum r14 //
#define ptr_max r15 //
#define recip_level r4 //
#define recip_shift r8 //
#define stride_v r28 //
#define sbuf r14 //
//Scaler registers
#define ptr_w0 r16 //
#define ptr_w1 r17 //
#define col_count r9 //
#define bias_ptr r18 //
#define ptr_x0 r19 //
#define ptr_x1 r20 //
#define ptr_xin r22 //
#define ptr_y r23 //
#define depth_cnt r26 //
#define filt_size r8 //
#define next_in_width_depth_stride r28 //
#define zzzz r7 //
#define _zzz r27 //
#define out_width4 r6 //
//Vector registers
#define vrecip v0 //
#define vshamt_vec v1 //
#define max v2 //
#define min v3 //
#define bias_val v4 //
#define w_654 v8 //
#define w3210 v28 //
#define x0 v30 //
#define x1 v29 //
#define x2 v5 //
#define x3 v11 //
#define x3x1x2x0 v30 //
#define x3x2x1x0 v30 //
#define x7x5x6x4 v29 //
#define x7x6x5x4 v10 //
#define xbxax9x8 v5 //
#define xbx9xax8 v5 //
#define xfxexdxc v11 //
#define x3x2x3x2 v6 //
#define x5x4x3x2 v6 //
#define x7x6x7x6 v9 //
#define x9x8x7x6 v9 //
#define xbxaxbxa v7 //
#define xdxcxbxa v7 //
#define s0 v12 //
#define s1 v13 //
#define s2 v14 //
#define s3 v15 //
#define z0 v16 //
#define z1 v17 //
#define z2 v18 //
#define z3 v19 //
#define d0 v20 //
#define d1 v21 //
#define d1d0 v24 //
#define d2 v22 //
#define d3 v23 //
#define d3d2 v25 //
#define d3210 v25 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec regs
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca regs
/* =========================================================================== */
{ allocframe(#56) //0th entry on stack (56+8)/4=20
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
} {
next_out_width_32 = memw(sp+#16<<2) //
depth = memw(sp+#17<<2) //
} {
out_width = memw(sp+#18<<2) //
out_height = memw(sp+#19<<2) //
depth = lsr(depth, #5) //depth/32
} {
filt_height = memw(sp+#20<<2) //
filt_zero = memw(sp+#21<<2) //
out_width4 = add(out_width, #3) //
} {
ptr_max = memw(sp+#23<<2) //
out_width4 = lsr(out_width4, #2) //
zzzz = vsplatb(filt_zero) //
} {
_zzz = lsr(zzzz, #8) //
recip_shift = memw(sp+#25<<2) //
stride_v = memw(sp+#26<<2) //
} {
max = vmem(ptr_max+#0) //
memw(sp+#17<<2) = depth //
vshamt_vec= vsplat(recip_shift) //
next_in_width_depth_stride = mpyi(next_in_width_depth,stride_v) //
} {
min = vmem(ptr_max+#1) //
depth_cnt = memw(sp+#17<<2) //depth
filt_size = filt_height //
filt_height = add(filt_height, #-1) //
}
/* ----------------------------------------------------------------------------- */
.balign 32
.L_height:
{ bias_ptr = memw(sp+#22<<2) //
ptr_xin = in_buf //
ptr_w0 = filt //
recip_level = memw(sp+#24<<2)
}
/* ----------------------------------------------------------------------------- */
.L_depth:
{
bias_val = vmem(bias_ptr++#1) //
loop0(.L_init, filt_size) //
ptr_x1 = ptr_xin //[WIDTH, P]
sbuf = memw(sp+#27<<2) //
} {
vrecip = vmem(recip_level++#1);
loop1(.L_width, out_width4) //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_init:
{
x0 = vmemu(ptr_x1+#0) //[0, 0]
} {
x1 = vmemu(ptr_x1+#1) //[0, 1]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, 1]
} {
x0.b = vshuff(x0.b) //[0, 2]
} {
x1.b = vshuff(x1.b) //[0, 3]
} {
x3x2x1x0.b = vshuff(x0.b) //[0, 4]
vmem(sbuf++#1) = x3x2x1x0.new //next xb-8
} {
x7x6x5x4.b = vshuff(x1.b) //[0, 5]
vmem(sbuf++#1) = x7x6x5x4.new //next xb-8
}:endloop0
/* --------------------------------------------------------------------------- */
{ sbuf = memw(sp+#27<<2) //[WIDTH]
w3210 = vmem(ptr_w0+#0) //[0, 0]
ptr_x0 = ptr_xin //
p3 = !cmp.eq(r0, r0) //
} {
col_count = out_width //
x3x2x1x0 = vmem(sbuf+#0) //[0, 1]
ptr_y = out_buf //
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_width:
{ x7x6x5x4 = vmem(sbuf+#1) //[0, 2]
} {
x2 = vmemu(ptr_x0+#2) //[0, 3]
ptr_w1 = add(ptr_w0, #128) //[WIDTH, P]
} {
s0.uw = vrmpy(x3x2x1x0.ub, w3210.ub) //[0, 4]filter even output
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[0, 4]
w_654 = vmem(ptr_w1++#1) //[0, 4]
ptr_x1 = ptr_x0 //[WIDTH, P]
} {
s2.uw = vrmpy(x7x6x5x4.ub, w3210.ub) //[0, 5]filter even output
xbx9xax8.b = vshuff(x2.b) //[0, 5]
x5x4x3x2.h = vshuffe(x7x6x5x4.h, x3x2x3x2.h) //[0, 5]
ptr_x0 = add(ptr_x0, #256) //[WIDTH,P]+8 32 depths for stride 2
} {
z0.uw = vrmpy(x3x2x1x0.ub, zzzz.ub) //[0, 6]filter even output
z2.uw = vrmpy(x7x6x5x4.ub, zzzz.ub) //[0, 6]filter even output
x3 = vmemu(ptr_x1+#3) //[0, 6]
} {
s0.uw += vrmpy(x7x6x5x4.ub, w_654.ub) //[0, 7]filter even output
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[0, 7]
xbxax9x8.b = vshuff(xbx9xax8.b) //[0, 7]
vmem(sbuf++#1) = xbxax9x8.new //[0, 7]next xb-8
} {
s1.uw = vrmpy(x5x4x3x2.ub, w3210.ub) //[0, 8]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h, x7x6x7x6.h) //[0, 8]
s0.w = vadd(s0.w, bias_val.w) //[WIDTH, P]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[0, 8]
} {
z0.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[0, 9]filter even output
z1.uw = vrmpy(x5x4x3x2.ub, zzzz.ub) //[0, 9]filter even output
x3.b = vshuff(x3.b) //[0, 9]
s1.w = vadd(s1.w, bias_val.w) //[WIDTH, P]
} {
s1.uw += vrmpy(x9x8x7x6.ub, w_654.ub) //[0,10]filter even output
xbxaxbxa.h = vshuffo(xbxax9x8.h, xbxax9x8.h) //[0,10]
s2.w = vadd(s2.w, bias_val.w) //[WIDTH, P]
loop0(.L_vert, filt_height) //[WIDTH, P]
} {
s3.uw = vrmpy(x9x8x7x6.ub, w3210.ub) //[0,11]filter even output
xfxexdxc.b = vshuff(x3.b) //[0,11]
vmem(sbuf++#1) = xfxexdxc.new //[0,11]next xb-8
z3 = #0 //[WIDTH]
}
/* --------------------------------------------------------------------------- */
.balign 32
.L_vert:
{ z1.uw+= vrmpy(x9x8x7x6.ub, _zzz.ub) //[0,12]filter even output
z3.uw+= vrmpy(x9x8x7x6.ub, zzzz.ub) //[0,12]filter even output
xdxcxbxa.h = vshuffe(xfxexdxc.h, xbxaxbxa.h) //[0,12]
w3210 = vmem(ptr_w1++#1) //[1, 0]
} {
s2.uw += vrmpy(xbxax9x8.ub, w_654.ub) //[0,13]filter even output
x3x2x1x0 = vmem(sbuf+#0) //[1, 1]
} {
s3.uw += vrmpy(xdxcxbxa.ub, w_654.ub) //[0,14]filter even output
x7x6x5x4 = vmem(sbuf+#1) //[1, 2]
} {
z2.uw += vrmpy(xbxax9x8.ub, _zzz.ub) //[0,15]filter even output
z3.uw += vrmpy(xdxcxbxa.ub, _zzz.ub) //[0,15]filter even output
x2 = vmemu(ptr_x1+#2) //[1, 3]
} {
s0.uw += vrmpy(x3x2x1x0.ub, w3210.ub) //[1, 4]filter even output
x3x2x3x2.h = vshuffo(x3x2x1x0.h, x3x2x1x0.h) //[1, 4]
w_654 = vmem(ptr_w1++#1) //[1, 4]
} {
s2.uw += vrmpy(x7x6x5x4.ub, w3210.ub) //[1, 5]filter even output
xbx9xax8.b = vshuff(x2.b) //[1, 5]
x5x4x3x2.h = vshuffe(x7x6x5x4.h, x3x2x3x2.h) //[1, 5]
} {
z0.uw += vrmpy(x3x2x1x0.ub, zzzz.ub) //[1, 6]filter even output
z2.uw += vrmpy(x7x6x5x4.ub, zzzz.ub) //[1, 6]filter even output
x3 = vmemu(ptr_x1+#3) //[1, 6]
} {
s0.uw += vrmpy(x7x6x5x4.ub, w_654.ub) //[1, 7]filter even output
x7x6x7x6.h = vshuffo(x7x6x5x4.h, x7x6x5x4.h) //[1, 7]
xbxax9x8.b = vshuff(xbx9xax8.b) //[1, 7]
vmem(sbuf++#1) = xbxax9x8.new //[1, 7]next xb-8
} {
s1.uw += vrmpy(x5x4x3x2.ub, w3210.ub) //[1, 8]filter even output
x9x8x7x6.h = vshuffe(xbxax9x8.h, x7x6x7x6.h) //[1, 8]
ptr_x1 = add(ptr_x1, next_in_width_depth) //[1, 8]
} {
z0.uw += vrmpy(x7x6x5x4.ub, _zzz.ub) //[1, 9]filter even output
z1.uw += vrmpy(x5x4x3x2.ub, zzzz.ub) //[1, 9]filter even output
x3.b = vshuff(x3.b) //[1, 9]
} {
s1.uw += vrmpy(x9x8x7x6.ub, w_654.ub) //[1,10]filter even output
xbxaxbxa.h = vshuffo(xbxax9x8.h, xbxax9x8.h) //[1,10]
} {
s3.uw += vrmpy(x9x8x7x6.ub, w3210.ub) //[1,11]filter even output
xfxexdxc.b = vshuff(x3.b) //[1,11]
vmem(sbuf++#1) = xfxexdxc.new //[1,11]next xb-8
}:endloop0
/* --------------------------------------------------------------------------- */
{ z2.uw += vrmpy(xbxax9x8.ub, _zzz.ub) //[1,15]filter even output
z3.uw+= vrmpy(x9x8x7x6.ub, zzzz.ub) //[1,12]filter even output
xdxcxbxa.h = vshuffe(xfxexdxc.h, xbxaxbxa.h) //[1,12]
s0.w = vsub(s0.w, z0.w) //[WIDTH]
} {
s2.uw += vrmpy(xbxax9x8.ub, w_654.ub) //[1,13]filter even output
s3.w = vadd(s3.w, bias_val.w) //[WIDTH, P]
d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
col_count = add(col_count, #-4) //[WIDTH]
} {
z1.uw+= vrmpy(x9x8x7x6.ub, _zzz.ub) //[1,12]filter even output
z3.uw += vrmpy(xdxcxbxa.ub, _zzz.ub) //[1,15]filter even output
s2.w = vsub(s2.w, z2.w) //[WIDTH,E]
s0.w = vasl(s0.w, vshamt_vec.w) //[WIDTH,E]
} {
s1.w = vsub(s1.w, z1.w) //[WIDTH]
s3.uw += vrmpy(xdxcxbxa.ub, w_654.ub) //[1,14]filter even output
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
} {
s2.w = vasl(s2.w, vshamt_vec.w) //[WIDTH]
d0.w = vmpye(s0.w, vrecip.uh) //[wIDTH]multiply by 1/max
s3.w = vsub(s3.w, z3.w) //[WIDTH]
p0 = !cmp.gt(col_count, #-3) //[WIDTH]
} {
d0.w += vmpyo(s0.w, vrecip.h):SSR //[WIDTH]
s1.w = vasl(s1.w, vshamt_vec.w) //[WIDTH]
if(p3) vmemu(ptr_y++#1) = d3210 //[WIDTH, E]
} {
d2.w = vmpye(s2.w, vrecip.uh) //[0,15]multiply by 1/max
s3.w = vasl(s3.w, vshamt_vec.w) //[WIDTH]
if(p0) s1 = s0 //[WIDTH] if over gen'd write valid val
p1 = !cmp.gt(col_count, #-1) //[WIDTH]
} {
if(p1) s3 = s0 //[WIDTH]
d2.w += vmpyo(s2.w, vrecip.h):SSR //[0,17]3
p0 = !cmp.gt(col_count, #-2) //[WIDTH]
min.w = vmin(min.w, d0.w) //[0,22]
} {
d1.w = vmpye(s1.w, vrecip.uh) //[0,22]multiply by 1/max
max.w = vmax(max.w, d0.w) //[0,18]
if(p0) d2 = d0 //[WIDTH]
p3 = cmp.eq(r0, r0) //[WIDTH]
} {
d1.w += vmpyo(s1.w, vrecip.h):SSR //[0,23]
min.w = vmin(min.w, d2.w) //[0,22]
max.w = vmax(max.w, d2.w) //[0,18]
} {
d3.w = vmpye(s3.w, vrecip.uh) //[0,22]multiply by 1/max
max.w = vmax(max.w, d1.w) //[0,26]
min.w = vmin(min.w, d1.w) //[0,27]
sbuf = memw(sp+#27<<2) //[WIDTH]
} {
d1d0.h = vpack(d1.w, d0.w):sat //[0,27]
d3.w += vmpyo(s3.w, vrecip.h):SSR //[0,23]
w3210 = vmem(ptr_w0+#0) //[0, 0]
} {
max.w = vmax(max.w, d3.w) //[0,26]
min.w = vmin(min.w, d3.w) //[0,27]
x3x2x1x0 = vmem(sbuf+#0) //[0, 1]
}:endloop1 //end width
/* --------------------------------------------------------------------------- */
{ d3d2.h = vpack(d3.w, d2.w):sat //[WIDTH, E]
ptr_w0 += asl(filt_size, #8) //[DEPTH,E]filt_size=filt_height*256
ptr_xin = add(ptr_xin, next_in_width_32) //[DEPTH]
} {
depth_cnt = add(depth_cnt, #-1) //[DEPTH,E]
out_buf = add(out_buf, next_out_width_32) //[DEPTH]
loop1(.L_width, out_width4) //[DEPTH]
} {
p0 = cmp.eq(depth_cnt, #0) //[DEPTH,E]
d3210.ub = vpack(d3d2.h, d1d0.h):sat //[WIDTH, E]
} {
vmemu(ptr_y+#0) = d3210 //[WIDTH, E]
if(!p0) jump .L_depth //[DEPTH,E]
if(p0) out_height = add(out_height, #-1) //
}//end depth
/* ----------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //
depth_cnt = memw(sp+#17<<2) //depth
in_buf=add(in_buf,next_in_width_depth_stride)//stride
if(!p0.new) jump:nt .L_height //
}//end height
/* ----------------------------------------------------------------------------- */
ptr_max = memw(sp+#23<<2) //
{
r17:16 = memd(sp+#0) //restore
vmem(ptr_max+#0) = max //
} {
r19:18 = memd(sp+#8) //restore
vmem(ptr_max+#1) = min //
} {
r21:20 = memd(sp+#16) //restore
r23:22 = memd(sp+#24) //restore
} {
r25:24 = memd(sp+#32) //restore
r27:26 = memd(sp+#40) //restore
} {
dealloc_return //return
}
/* ----------------------------------------------------------------------------- */
.L_end:
.size dwconv2dbbb_s2_7xN_asm, .L_end-dwconv2dbbb_s2_7xN_asm
/* ----------------------------------------------------------------------------- */
|
XiaoMi/nnlib | 27,461 | hexagon/asm_src/gvconv2dbbb_d32_v60_h.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/11/17 created
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = 928 bytes
* STACK = 80 bytes
* ASSUMPTIONS
*
*
*
* C MODEL
*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2dbbb_d32_v60_h.S"
.global gvconv2dbbb_v60_asm
.balign 32
.type gvconv2dbbb_v60_asm, @function
gvconv2dbbb_v60_asm:
/*=============================================================================*/
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define in_width r3 //(pad_l+in_width+pad_r)
#define out_width_stride_depth r4 //next line amount
#define out_width r5 //is amount of work to be done
#define stride_h_w r6 //0 stride|in_depth
#define in_depth r27 //1 input depth multiples of 32
#define filt_width r5 //2 horizontal fuilter width
#define filt_height r8 //3 filt_height lines per filter
#define out_height r9 //4 number of vertical lines to perform
#define ptr_filtsum r10 //5 includes the computation filt_sum * in_offset + biasvec
#define ptr_sumabuf0 r1 //6 filt_offset*in_offset - in_offset*sum(in)
#define next_sumabuf r26 //7 stride to get to next row of suma values
#define ptr_max r7 //8 maximum and minum buffer
#define recip_level_ptr r14 //9 pointer to 32 of int32, 255 / (MAX - MIN) - used to scale to bytes
#define zshift r7 //10 amount to << results (re-use ptr_max)
/*=============================================================================*/
#define ptr_sumabuf r27 //temp value of suma buffer ptr
#define in_width_stride_depth r15 //in_width * stride * in_depth for next output
#define fetch_ptr0 r17 //base fetch pointer
#define ptr_x0 r16 //tmp pointer to activations
#define ptr_x1 r23 //dynamic pointer to activations
#define fetch_ptr r14 //dynamic fetch pointer
#define sum_stride r21 //step between suma values 4 or 8 bytes
#define stride_h r6 //vertical stride
#define stride3_w r6 //3*32*stride_w
#define stride_w r18 //32*stride_w
#define next_outputs r19 //jump to input ptr for next set of outputs
#define fetch_offset r21
#define fetch_out r28
#define ptr_w r20 //pointer to weights
#define in_width_32 r22 //width of input image in bytes
#define c4 r2 //shuffle size in final max and min find
#define ptr_z r24 //pointer to outputs
#define col_count r25 //column count, how much of width used
#define col_count_ptr_z r25:24 //packed for double word read
#define x07x04_x03x00 r11:10 //8 activations output 0
#define x07x04 r11 //4 activations output 0
#define x03x00 r10 //4 activations output 0
#define x17x14_x13x10 r13:12 //8 activations output 1
#define x17x14 r13 //4 activations output 1
#define x13x10 r12 //4 activations output 1
#define x27x24_x23x20 r11:10 //8 activations output 2
#define x27x24 r11 //4 activations output 2
#define x23x20 r10 //4 activations output 2
#define x37x34_x33x30 r3:2 //8 activations output 3
#define x37x34 r3 //4 activations output 3
#define x33x30 r2 //4 activations output 3
#define sum0 r2 //suma for output 0
#define sum1 r3 //suma for output 1
#define sum2 r2 //suma for output 2
#define sum3 r3 //suma for output 3
/*=============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(VSRC) nop //.word (0x1DFFE100+VSRC) //debug vec reg
#define s0 v0 //accumulator for output 0
#define s1 v1 //accumulator for output 1
#define s1s0 v1:0 //accumulator
#define s2 v2 //accumulator for output 2
#define s3 v3 //accumulator for output 3
#define s3s2 v3:2 //
#define d0 v4 //
#define d1 v5 //
#define d2 v4 //
#define d3 v5 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define yout v17 //
#define y3_prev v16 //copy of previous value
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define minomine v19:18 //
#define mine v18 //
#define mino v19 //
#define biasvec v16 //
#define recipvec v15 //
#define vrnd v20 //
#define sk v21 //
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
{ allocframe(#72) // 0th entry on stack is (72+8)/4 =20 ints
} {
memd(sp+#0) = r17:16 //
memd(sp+#24) = r23:22 //
r23 = ##0x80000001 // init for max
} {
memd(sp+#8) = r19:18 //
memd(sp+#16) = r21:20 //
maxe = vsplat(r23) //
} {
memw(sp+#52) = ptr_wi //
memd(sp+#40) = r27:26 //
mine.w = vabs(maxe.w) // +0x7fffffff
} {
zshift = memw(sp+#30<<2) // amount to << the products
ptr_sumabuf0 = memw(sp+#26<<2) //read in the ptr to the suma buffer value
} {
p3 = cmp.gt(zshift,#0) // do we need to do << on the sums?
memw(sp+#56) = ptr_zi //
memw(sp+#60) = out_width //
} {
stride_h_w = memw(sp+#20<<2) //extract stride*depth
in_depth = memw(sp+#21<<2) //
} {
filt_width = memw(sp+#22<<2) //extract filt_width
filt_height = memw(sp+#23<<2) //extract filt_height
stride_w = zxth(stride_h_w) //
stride_h = lsr(stride_h_w, #16) //
} {
filt_width = asl(filt_width, #2) //
filt_height=mpy(filt_height.L,in_depth.L) //filt_height*in_depth
fetch_out = sub(filt_width,stride_w) // filt_width - stride_w
dcfetch(ptr_sumabuf0+#0) //fetch the suma values
} {
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
fetch_out = mpyi(fetch_out,#32*3) //
ptr_filtsum = memw(sp+#25<<2) //ptr pre computed weight sum
out_height = memw(sp+#24<<2) //number of output lines
} {
memd(sp+#32) = r25:24 //
recip_level_ptr = memw(sp+#29<<2) //
filt_width = add(filt_width, #-1) //account for software pipeline
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
} {
sum_stride = asl(stride_w, #2) //stride for th sum data 1 or 2 words
recipvec = vmem(recip_level_ptr+#0) //
next_sumabuf = memw(sp+#27<<2) //stride of ptr pre computed input sum
} {
M0 = sum_stride //allow stride by 4bytes or 8bytes
wsum = vmem(ptr_filtsum+#0) //
in_width = mpyi(in_width, in_depth) //
ptr_sumabuf = ptr_sumabuf0 //
} {
next_outputs=mpyi(filt_height,in_width_32) //filt_height*in_width*in_depth
stride_w = asl(stride_w, #7) //128 or 256
filt_height = add(filt_height, #-1) //peel off 1 itn of outer loop
dcfetch(ptr_xi+#2<<5) //
} {
fetch_offset = #0 //
next_outputs = sub(next_outputs, stride_w) //1*128,2*128
sum0 = memw(ptr_sumabuf++M0) //[P, 0]sum 0
dcfetch(ptr_xi+#3<<5) //
} {
in_width_stride_depth=mpyi(in_width,stride_h)//
d0 = vsplat(sum0) //[P, 1]
sum1 = memw(ptr_sumabuf++M0) //[P, 1]sum 1
} {
stride_w = lsr(stride_w, #2) //32 or 64
s0.w = vadd(wsum.w, d0.w) //[P, 2]
d1 = vsplat(sum1) //[P, 2]
sum2 = memw(ptr_sumabuf++M0) //[P, 2]sum 2
} {
stride3_w = addasl(stride_w, stride_w, #1) //3*stride, 32*3 or 64*3
s1.w = vadd(wsum.w, d1.w) //[P, 3]
d2 = vsplat(sum2) //[P, 3]
sum3 = memw(ptr_sumabuf++M0) //[P, 3]sum 3
}{
fetch_out = max(fetch_out,fetch_offset) //if negative, set to 0
s2.w = vadd(wsum.w, d2.w) //[P, 4]
d3 = vsplat(sum3) //[P, 4]
}
/* ---------------------------------------------------------------------------- */
// .balign 32
.L_height:
{ ptr_x0 = ptr_xi //ptr_xi
col_count_ptr_z = memd(sp+#56<<0) //out_width & ptr_zi on stack //
out_height = add(out_height, #-1) //
fetch_ptr0 = add(ptr_xi, in_width_32) //[P, 4]fetch from next line
} {
memw(sp+#56<<0) += out_width_stride_depth //ptr_zi next output line for depth segmnt
p0 = cmp.eq(out_height,#0) // last iteration?
ptr_x1 = ptr_x0 //[P, 5]
loop1(.L_filt_height, filt_height) //[P, 0]for(fil=0;fil<h*depth/32;fil+=1){
} {
ptr_w = memw(sp+#52<<0) //[P, 0]ptr_wi initialize filter pointer
if (!p0) ptr_sumabuf0=add(ptr_sumabuf0,next_sumabuf)
// ptr_sumabuf += next_sumabuf
if (!p0) ptr_xi = add(ptr_xi, in_width_stride_depth)
//ptr_x+=in_width*stride*in_depth)
if ( p0) fetch_offset = fetch_out //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
{ s3.w = vadd(wsum.w, d3.w) //[P, 5]
p0 = cmp.eq(filt_height, #0) //if in_depth = 32 and filt_height = 1
p1 = cmp.gt(col_count,#4) //p1 = !(last iteration of L_width loop)
if (!p1.new) fetch_ptr0 = sub(fetch_ptr0,fetch_offset)
//fetch_offset =0 except last iteration of L_height loop
}{
fetch_ptr = fetch_ptr0 //
col_count = add(col_count, #-4) //
x27x24_x23x20 = memd(ptr_x0+stride_w<<#1) //[0, 0]
if (p0) jump:nt .L_peel1 //peel off 1 iteration
}
/* ---------------------------------------------------------------------------- */
.L_filt_height:
{ loop0(.L_filt_widthN_1, filt_width) //[P, 0]ki is k1/32 - 0
x37x34_x33x30 = memd(ptr_x0+stride3_w<<#0) //[0, 0]
ptr_x0 = add(ptr_x0, in_width_32) //[E, 7]next line ptr_y keeps going
dcfetch(fetch_ptr+#0<<5) //[1, 3]
}{
y0.cur = vmem(ptr_w++#1) //[0, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 1]
x17x14_x13x10 = memd(ptr_x1+stride_w<<#0) //[0, 1]
}
/* ---------------------------------------------------------------------------- */
.L_filt_widthN_1:
{ y1.cur = vmem(ptr_w++#1) //[0, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 2]
x07x04_x03x00 = memd(ptr_x1++#1<<3) //[0, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 3]
dcfetch(fetch_ptr+#1<<5) //[0, 3]prefetch the right line
x37x34_x33x30 = memd(ptr_x1+stride3_w<<#0) //[1, 0]
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 4]
x27x24_x23x20 = memd(ptr_x1+stride_w<<#1) //[1, 0]
fetch_ptr = add(fetch_ptr, #32) //[0, 4]advance fetch by 1 line
} {
y0.cur = vmem(ptr_w++#1) //[1, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 1]
x17x14_x13x10 = memd(ptr_x1+stride_w<<#0) //[1, 1]
}:endloop0
/* ---------------------------------------------------------------------------- */
{ y1.cur = vmem(ptr_w++#1) //[1, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 2]
x07x04_x03x00 = memd(ptr_x1+#0<<3) //[1, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 3]
ptr_x1 = ptr_x0 //[P, 2]
fetch_ptr0 = add(fetch_ptr0, in_width_32) //[P, 4]fetch from next line
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 4]
x27x24_x23x20 = memd(ptr_x0+stride_w<<#1) //[P, 4]
fetch_ptr = fetch_ptr0 //
}:endloop1
/* ----------------------------------------------------------------- */
.L_peel1:
{ loop0(.L_filt_width1, filt_width) //[P, 0]ki is k1/32 - 0
x37x34_x33x30 = memd(ptr_x0+stride3_w<<#0) //[0, 0]
fetch_ptr = sub(fetch_ptr0, next_outputs) //[E, 7]move to column +in_width_32 +next_outputs
}{
if (!p1) fetch_ptr = ptr_xi //if last iteration, move to next row
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_filt_width1:
{ y0.cur = vmem(ptr_w++#1) //[0, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 1]
x17x14_x13x10 = memd(ptr_x1+stride_w<<#0) //[0, 1]
} {
y1.cur = vmem(ptr_w++#1) //[0, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 2]
x07x04_x03x00 = memd(ptr_x1++#1<<3) //[0, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 3]
dcfetch(fetch_ptr+#0<<5) //[0, 3]final prefetch sequence for this column
x37x34_x33x30 = memd(ptr_x1+stride3_w<<#0) //[1, 0]
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 4]
x27x24_x23x20 = memd(ptr_x1+stride_w<<#1) //[1, 0]
fetch_ptr = add(fetch_ptr, #32) //[0, 3]final fetch advance
}:endloop0
/* ---------------------------------------------------------------------------- */
{ y0.cur = vmem(ptr_w++#1) //[1, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 1]
x17x14_x13x10 = memd(ptr_x1+stride_w<<#0) //[1, 1]
} {
y1.cur = vmem(ptr_w++#1) //[1, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[1, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[1, 2]
x07x04_x03x00 = memd(ptr_x1+#0<<3) //[1, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 3]
if (!p1) ptr_sumabuf = ptr_sumabuf0 //
} {
y2.w = vmpye(s2.w, recipvec.uh) //
ptr_x0 = add(ptr_x0, in_width_32) //[E, 7]next line ptr_y keeps going
dcfetch(ptr_sumabuf+#0<<5) //fetch next suma's
if( p3 ) jump:nt .L_do_zshift // go do (s0,s1,s2,s3) <<= zshift
} { //** note ** effect of following packet needs to be duplicated at .L_do_zshift
s0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 4]
dcfetch(fetch_ptr+#0<<5) //fetch line
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next 4
}
.L_done_zshift:
/* ---------------------------------------------------------------------------- */
{ y2.w+= vmpyo(s2.w, recipvec.h):SSR
maxe.w = vmax(maxe.w, s0.w) //see if s0 is max
p2 = cmp.gt(col_count,#-3) //should s1 be included ?
sk = s0 //initialize sk with s0
} {
y0.w = vmpye(s0.w, recipvec.uh) //
mine.w = vmin(mine.w, s0.w) //see if s0 is min
if (p2) sk = s1 //inclued s1 if needed
p2 = cmp.gt(col_count,#-2) //should s2 be included ?
} {
y0.w+= vmpyo(s0.w, recipvec.h):SSR //
maxe.w = vmax(maxe.w, sk.w) //see if s1 is max
mine.w = vmin(mine.w, sk.w) //see if s1 is min
ptr_w = memw(sp+#52<<0) //[P, 0]ptr_wi initialize filter pointer
} {
y3.w = vmpye(s3.w, recipvec.uh) //
if (p2) sk = s2 //include s2 if needed
ptr_x1 = ptr_x0 //[P, 5]
//loop1(.L_filt_height, filt_height) //[P, 1]for(fil=0;fil<h*depth/32;fil+=1){
lc1 = filt_height //allow future code movement
} {
y3.w+= vmpyo(s3.w, recipvec.h):SSR //
maxe.w = vmax(maxe.w, sk.w) //see if s2 is max
mine.w = vmin(mine.w, sk.w) //see if s2 is min
p2 = cmp.gt(col_count,#-1) //should s3 be included ?
} {
y1.w = vmpye(s1.w, recipvec.uh) //
if (p2) sk = s3 //include s3 if needed
sum0 = memw(ptr_sumabuf++M0) //[P, 0]sum 0
} {
y1.w+= vmpyo(s1.w, recipvec.h):SSR //
maxe.w = vmax(maxe.w, sk.w) //see if s2 is max
mine.w = vmin(mine.w, sk.w) //see if s3 is min
sum1 = memw(ptr_sumabuf++M0) //[P, 1]sum 1
} {
y3.h = vpack(y3.w, y2.w):sat //#sat8 <0, >255
d0 = vsplat(sum0) //[P, 1]
d1 = vsplat(sum1) //[P, 2]
sum2 = memw(ptr_sumabuf++M0) //[P, 2]sum 2
} {
y1.h = vpack(y1.w, y0.w):sat //#>>16
fetch_ptr0 = add(ptr_x0, in_width_32) //[P, 4]fetch from next line
sum3 = memw(ptr_sumabuf++M0) //[P, 3]sum 3
} {
s0.w = vadd(wsum.w, d0.w) //[P, 2]
s1.w = vadd(wsum.w, d1.w) //[P, 3]
d2 = vsplat(sum2) //[P, 3]
d3 = vsplat(sum3) //[P, 4]
} {
y3.ub = vpack(y3.h, y1.h):sat //#sat8 <0, >255
vmem(ptr_z++#1):nt = y3.new //#[E, ]store 2nd 32bytes
s2.w = vadd(wsum.w, d2.w) //[P, 4]
if (p1) jump:t .L_width //
}//end cols per line
/* ---------------------------------------------------------------------------- */
{ p0 = cmp.eq(out_height, #0) //
if(!p0.new) jump:t .L_height //
}//end lines per block
/* ---------------------------------------------------------------------------- */
// scale mine,maxe according to scales
// find min/max reduced over previous min/max;
// We don't need to << the min/max by zshift, since they were reduced
// from the values taken after the << by zshift.
//
{
y0.w = vmpye(maxe.w, recipvec.uh)
ptr_max = memw(sp+#28<<2) // ptr to existing max/min values
} {
s0 = vmem(ptr_max+#0) // previous max
y0.w+= vmpyo(maxe.w, recipvec.h):SSR
} {
maxe.w = vmax(y0.w, s0.w) // new max
s1 = vmem(ptr_max+#1) // previous min
y1.w = vmpye(mine.w, recipvec.uh)
} {
vmem(ptr_max+#0) = maxe
y1.w+= vmpyo(mine.w, recipvec.h):SSR
}
/*=============================================================================*/
{
vmem(ptr_max+#1) = mine.new //
mine.w = vmin(s1.w, y1.w); // new min
} {
r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
// broken-out code to << s0,s1,s2,s3 all by zshift
// But, at time of branch, sums s0,s1 aren't yet completed;
// and there are some side effects from a skipped packet that
// we need to duplicate before going back.
// Also, y2=s2*scale product is partly done when the branch occurs, so that
// needs to be redone with the new s2.
//
.balign 32
.L_do_zshift:
{
s2.w = vasl(s2.w, zshift )
s0.uw += vrmpy(y1.ub, x07x04.ub) //[1, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[1, 4]
} {
s3.w = vasl(s3.w, zshift )
dcfetch(fetch_ptr+#0<<5) //fetch line
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next 4
} {
s0.w = vasl(s0.w, zshift)
y2.w = vmpye(s2.w, recipvec.uh); // redo the s2->y2 partial calc
} {
s1.w = vasl(s1.w, zshift)
jump .L_done_zshift;
}
.L_end:
/*=============================================================================*/
.size gvconv2dbbb_v60_asm, .L_end-gvconv2dbbb_v60_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 3,981 | hexagon/asm_src/maxpool_aligned_hvx.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
#if 0
for (z = 0; z < out_depth; z++) {
/* foreach window y * foreach window x */
sum = 0;
for (in_y = start_y; in_y < end_y; in_y++) {
for (in_x = start_x; in_x < end_x; in_x++) {
uint32_t data = in0[z + in_depth * in_x + in_depth * in_width * in_y];
sum += data;
}
}
out0[z] = (sum / count);
}
#endif
.global maxpool_aligned_hvx
.type maxpool_aligned_hvx, @function
.balign 32
maxpool_aligned_hvx:
/* ============================================================================ */
#define dsto r0 //dest ptr
#define srco r1 //src ptr
#define image_depth r2 //num bytes
#define win_width r3
#define win_height r4
#define image_width r5
#define stride r7
#define stride0 r8
#define c0101 r9
#define src r10
#define z1z0 v1:0
#define z0 v0
#define z1 v1
#define x0 v2
#define y0 v3
#define z2 v4
#define z3 v5
#define vzero v6
/* ============================================================================ */
{
M0 = image_depth
stride = sub(image_width, win_width)
src = srco
} {
stride = mpyi(stride, image_depth)
srco = add(srco, #128)
loop1(.L_vert, win_height)
} {
loop0(.L_horz, win_width)
z0 = #0
}
/* ============================================================================ */
.balign 32
.L_vert:
.L_horz:
{
x0.tmp = vmem(src++M0) //+in_depth* in_x
z0.ub = vmax(x0.ub, z0.ub) //
}:endloop0
{
src = add(src, stride)
loop0(.L_horz, win_width)
}:endloop1
{
src = srco
srco = add(srco, #128)
image_depth = add(image_depth, #-128)
loop1(.L_vert, win_height)
} {
p0 = !cmp.eq(image_depth, #0)
vmem(dsto++#1) = z0
z0 = #0
if(p0.new) jump:t .L_vert
}
jumpr r31
.L_end:
/*==============================================================================*/
.size maxpool_aligned_hvx, .L_end-maxpool_aligned_hvx
|
XiaoMi/nnlib | 1,805 | hexagon/asm_src/nothing.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
.text
.Lfoo:
.word 0
|
XiaoMi/nnlib | 5,297 | hexagon/asm_src/avgpool_nonaligned_hvx.S |
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
*/
#if 0
for (z = 0; z < out_depth; z++) {
/* foreach window y * foreach window x */
sum = 0;
for (in_y = start_y; in_y < end_y; in_y++) {
for (in_x = start_x; in_x < end_x; in_x++) {
uint32_t data = in0[z + in_depth * in_x + in_depth * in_width * in_y];
sum += data;
}
}
out0[z] = (sum / count);
}
#endif
.global avgpool_nonaligned_hvx
.type avgpool_nonaligned_hvx, @function
.balign 32
avgpool_nonaligned_hvx:
/* ============================================================================ */
#define dsto r0 //dest ptr
#define srco r1 //src ptr
#define image_depth r2 //num bytes
#define win_width r3
#define win_height r4
#define image_width r5
#define scale r6
#define stride r7
#define stride0 r8
#define c0101 r9
#define src r10
#define width r11 //write width
#define dalign r12
#define mdsto r13
#define z1z0 v1:0
#define z0 v0
#define z1 v1
#define x0 v2
#define y0 v3
#define z2 v4
#define z3 v5
#define vzero v6
#define d0 v7
#define qprolog q0
#define qepilog q1
/* ============================================================================ */
{
scale = memw(sp+#0)
M0 = image_depth
stride = sub(image_width, win_width)
} {
stride = mpyi(stride, image_depth)
c0101 = ##0x01010101
scale = combine(scale.L, scale.L)
} {
loop1(.L_vert, win_height)
vzero = #0
src = srco
srco = add(srco, #128)
} {
loop0(.L_horz, win_width)
z1z0 = vcombine(vzero, vzero)
}
/* ============================================================================ */
.balign 32
.L_vert:
.L_horz:
{
x0 = vmemu(src++M0) //+in_depth* in_x
} {
nop
} {
z1z0.uh += vmpy(x0.ub, c0101.ub) //multiply vy 1 to uh
}:endloop0
{
src = add(src, stride)
loop0(.L_horz, win_width)
}:endloop1
{
z2.h = vmpy(z0.h, scale.h):<<1:rnd:sat
image_depth = add(image_depth, #-128) //160
loop1(.L_vert, win_height)
} {
z3.h = vmpy(z1.h, scale.h):<<1:rnd:sat
p0 = cmp.gt(image_depth, #0) //0
width =add(image_depth, #128) //
dalign = and(dsto, #127) //0
} {
qprolog = vsetq(dsto) //vmem(dsto++#1) = y0 onaligned //76543210
if(p0) width = #128 //128
} {
mdsto = sub(#0, dsto)
y0.ub = vsat(z3.h, z2.h) //
dalign = add(dalign, width) //128
} {
qepilog = vsetq(dalign) //________
} {
d0 = vror(y0, mdsto) //54321076
p1 = cmp.gt(dalign, #127) //0 is block not less than 128 bytes
if(p1.new) jump:nt .L_gt
}
{
qepilog = and(qepilog, !qepilog) //________
qprolog = or(qprolog, !qepilog) //________
}
.L_gt:
{
z0 = #0
if( qepilog) vmem(dsto+#1) = d0 //________
src = srco
} {
z1 = #0
srco = add(srco, #128)
if(!qprolog) vmem(dsto++#1) = d0 //76543210
if(p0) jump .L_vert
}
jumpr r31
.L_end:
/*==============================================================================*/
.size avgpool_nonaligned_hvx, .L_end-avgpool_nonaligned_hvx
|
XiaoMi/nnlib | 23,365 | hexagon/asm_src/inconv2dbbb_d32_v60_h.S | /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvconv2dbbb_v60_asm
*
* DESCRIPTION
* Perform 2d convolution using elements of size in_depth < 32. Results are
* scaled and saturated to 8bits. Max and Min accumulations are kept.
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
* DJH 05/11/17 created
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = 928 bytes
* STACK = 80 bytes
* ASSUMPTIONS
*/
/*=============================================================================*/
.text
.file "inconv2dbbb_d32_v60_h.S"
.global inconv2dbbb_v60_asm
.balign 32
.type inconv2dbbb_v60_asm, @function
/* parameters:
* r0 ( const uint8_t * input,
* r1 const uint8_t * weights,
* r2 uint8_t * output,
* r3 int in_width_pad,
* r4 int next_out_width_row,
* r5 int out_width,
* PARMW(0) int indepth,
* PARMW(1) int filt_width,
* PARMW(2) int filt_height,
* PARMW(3) int num_out_lines,
* PARMW(4) int32_t * minmax_buf,
* PARMW(5) int recip_level,
* PARMW(6) const int32_t *biasbuf,
* PARMW(7) const int32_t *ptr_suma,
* PARMW(8) int next_suma,
* PARMW(9) int stride_height_width,
* PARMW(10) int recip_shamt);
*/
inconv2dbbb_v60_asm:
/*=============================================================================*/
#define ptr_xi r0 //data
#define ptr_wi r1 //weights
#define ptr_zi r2 //results
#define in_width r3 //(pad_l+in_width+pad_r)
#define out_width_stride_depth r4 //next line amount
#define out_width r5 //is amount of work to be done
#define in_depth r26 //0
#define filt_width r17 //1 horizontal fuilter width
#define filt_height r8 //2 filt_height lines per filter
#define out_height r9 //3 number of vertical lines to perform
#define ptr_max r13 //4 maximum and minum buffer
#define recip_level r14 //5 255 / (MAX - MIN) - used to scale to bytes
#define filt_sum r15 //6 gemsumb
#define active_sum r18 //7 gemsuma activations
#define next_suma_buf r1 //8 stride for suma buffer
#define stride_v_h r10 //9 stride_vert | stride_horz ->M0
#define sum0 r19
#define sum1 r19
#define sum2 r19
#define sum3 r19
#define recip_shamt r7
/*=============================================================================*/
#define fetch_ptr r0
#define fetch_ptr0 r2
#define in_depth3 r21
#define in_width_stride_depth r15 //in_width * stride * in_depth for next output
#define ptr_x0 r16 //tmp pointer to activations
#define ptr_x1 r23 //dynamic pointer to activations
#define next_outputs r27 //jump to input ptr for next set of outputs
#define ptr_w r20 //pointer to weights
#define in_width_depth r22 //width of input image in bytes
#define c4 r2 //shuffle size in final max and min find
#define ptr_z r24 //pointer to outputs
#define col_count r25 //column count, how much of width used
#define x07x04_x03x00 r11:10 //8 activations output 0
#define x07x04 r11 //4 activations output 0
#define x03x00 r10 //4 activations output 0
#define x17x14_x13x10 r13:12 //8 activations output 1
#define x17x14 r13 //4 activations output 1
#define x13x10 r12 //4 activations output 1
#define x27x24_x23x20 r11:10 //8 activations output 2
#define x27x24 r11 //4 activations output 2
#define x23x20 r10 //4 activations output 2
#define x37x34_x33x30 r7:6 //8 activations output 3
#define x37x34 r7 //4 activations output 3
#define x33x30 r6 //4 activations output 3
/*=============================================================================*/
#define PV(VSRC) .word (0x1DFFE020+VSRC) //debug vec reg
#define PS(SSRC) .word (0x1DFFE100+SSRC) //debug sca reg
#define s0 v0 //accumulator for output 0
#define s1 v1 //accumulator for output 1
#define s1s0 v1:0 //accumulator
#define s2 v2 //accumulator for output 2
#define s3 v3 //accumulator for output 3
#define s3s2 v3:2 //
#define d0 v4 //
#define d1 v4 //
#define d2 v4 //
#define d3 v5 //
#define y0 v8 //
#define y1 v9 //
#define y2 v10 //
#define y3 v11 //
#define yout v17 //
#define y3_prev v16 //copy of previous value
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxomaxe v13:12 //
#define maxe v12 //
#define maxo v13 //
#define minomine v19:18 //
#define mine v18 //
#define mino v19 //
#define biasvec v16 //
#define recipvec v15 //
#define vzero v20
#define SSR <<1:rnd:sat:shift //simplfy mpy instruction
/*=============================================================================*/
#define FRAMESIZE 72
#define PARMW(n) sp+#(8+FRAMESIZE+4*(n))
{ allocframe(#FRAMESIZE) // 0th entry on stack is (72+8)/4 =20 ints
} {
memd(sp+#0) = r17:16 //
memd(sp+#8) = r19:18 //
} {
r16 = #0x7fffffff;
memd(sp+#16) = r21:20 //
memd(sp+#24) = r23:22 //
} {
memd(sp+#32) = r25:24 //
memd(sp+#40) = r27:26 //
mine = vsplat(r16)
} {
memw(sp+#48) = ptr_xi
memw(sp+#52) = ptr_wi
maxe = vnot(mine)
} {
in_depth = memw(PARMW(0))
filt_height = memw(PARMW(2)) //extract filt_height
} {
filt_width = memw(PARMW(1)) //extract filt_width
p0 = cmp.eq(filt_height, #1) //is filt 1xN?
} {
p3 = tstbit(filt_width,#0) //is filter Nx2 ?
memw(sp+#56) = ptr_zi
filt_width = lsr(filt_width, #1) //
out_height = memw(PARMW(3)) //number of output lines
} {
p2 = cmp.eq(filt_width, #0) //is filt width = 1? (now 0)?
recip_level = memw(PARMW(5)) //
filt_sum = memw(PARMW(6))
in_width_depth = mpyi(in_width, in_depth) //in_depth * in_width line
} {
wsum = vmem(filt_sum+#0) //
recipvec = vsplat(recip_level) //
stride_v_h = memw(PARMW(9))
} {
next_suma_buf = memw(PARMW(8))
active_sum = memw(PARMW(7)) //
in_width_stride_depth=mpy(in_width_depth.L,stride_v_h.H)//
stride_v_h = zxth(stride_v_h)
} {
in_depth = mpyi(in_depth, stride_v_h)
stride_v_h = asl(stride_v_h, #2)
} {
next_outputs=mpyi(filt_height,in_width_depth)//filt_height*in_width*in_depth
filt_height = add(filt_height, #-1) //
M0 = stride_v_h
} {
next_outputs += mpyi(stride_v_h, #-4) //
memw(sp+#60) = active_sum //
in_depth3 = addasl(in_depth, in_depth, #1) //
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_height:
{ active_sum = memw(sp+#60)
} {
ptr_z = memw(sp+#56) //ptr_zi
col_count = out_width
sum0 = memw(active_sum++M0) //stride = 4
} {
memw(sp+#60) += next_suma_buf
out_height = add(out_height, #-1) //
d0 = vsplat(sum0)
sum1 = memw(active_sum++M0) //stride = 4
} {
s0.w = vadd(wsum.w, d0.w)
d1 = vsplat(sum1)
sum2 = memw(active_sum++M0) //stride = 4
} {
s1.w = vadd(wsum.w, d1.w)
d2 = vsplat(sum2)
sum3 = memw(active_sum++M0) //stride = 4
memw(sp+#56) += out_width_stride_depth //ptr_zi += out_width_stride_depth
} {
ptr_x0 = memw(sp+#48) //ptr_xi
memw(sp+#48) += in_width_stride_depth //ptr_xi+=in_width_stride_depth //ptr_x+=in_width*stride*in_depth)
s2.w = vadd(wsum.w, d2.w)
d3 = vsplat(sum3)
} {
fetch_ptr0 = add(ptr_x0, in_width_depth) //l1 fetch from next lin
s3.w = vadd(wsum.w, d3.w)
loop1(.L_filt_height, filt_height) //[P, 0]for(fil=0;fil<h*depth/32;fil+=1){
ptr_w = memw(sp+#52) //[P, 0]ptr_wi initialize filter pointer
}
/* ---------------------------------------------------------------------------- */
.balign 32
.L_width:
{
ptr_x1 = ptr_x0 //[P, 5]
x27x24_x23x20 = memd(ptr_x0+in_depth<<#1) //[0, 0]
x37x34_x33x30 = memd(ptr_x0+in_depth3<<#0) //[0, 0]
if(p0) jump .L_peel1 //[0, 0]
}
/* ---------------------------------------------------------------------------- */
.L_filt_height:
{ loop0(.L_filt_widthN_1, filt_width) //[P, 0]ki is k1/32 - 0
ptr_x0 = add(ptr_x0, in_width_depth) //[E, 7]next line ptr_y keeps going
fetch_ptr = fetch_ptr0
if (p2) jump:nt .L_epi_onlyN_1; // skip loop when filter_width=1
}
/* ---------------------------------------------------------------------------- */
.L_filt_widthN_1:
{
y0.cur = vmem(ptr_w++#1) //[0, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 1]
x17x14_x13x10 = memd(ptr_x1+in_depth<<#0) //[0, 1]
} {
y1.cur = vmem(ptr_w++#1) //[0, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 2]
x07x04_x03x00 = memd(ptr_x1++#1<<3) //[0, 2]stride = 4
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 3]
dcfetch(fetch_ptr+#0<<5) //[0, 3]
fetch_ptr = add(fetch_ptr, #32) //[0, 3]
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 4]
x27x24_x23x20 = memd(ptr_x1+in_depth<<#1) //[1, 0]
x37x34_x33x30 = memd(ptr_x1+in_depth3<<#0) //[1, 0]
}:endloop0
/* ---------------------------------------------------------------------------- */
.L_epi_onlyN_1:
// note: p2=1, p3=0 doesn't happen.
{ if(!p3) jump .L_evenN_1 //for even filters
dcfetch(fetch_ptr+#0<<5) //[1, 2]
fetch_ptr0 = add(fetch_ptr0, in_width_depth) //[0, 3]
} {
y0.cur = vmem(ptr_w++#1) //[1, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 1]
x13x10 = memw(ptr_x1+in_depth<<#0) //[1, 1]
} {
x03x00 = memw(ptr_x1+#0<<3) //[1, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 3]
}
.L_evenN_1:
{ x27x24_x23x20 = memd(ptr_x0+in_depth<<#1) //[0, 0]
x37x34_x33x30 = memd(ptr_x0+in_depth3<<#0) //[0, 0]
ptr_x1 = ptr_x0 //[P, 2]
}:endloop1
/* ---------------------------------------------------------------------------- */
.balign 32
.L_peel1:
{ loop0(.L_filt_width1, filt_width) //[P, 0]ki is k1/32 - 0
ptr_x0 = add(ptr_x0, in_width_depth) //[E, 7]next line ptr_y keeps going
fetch_ptr0 = sub(fetch_ptr0, next_outputs) //[E, 7]
if (p2) jump:nt .L_epi_only_1; // skip loop when filter_width=1
}
/* ---------------------------------------------------------------------------- */
.L_filt_width1:
{ y0.cur = vmem(ptr_w++#1) //[0, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[0, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[0, 1]
x17x14_x13x10 = memd(ptr_x1+in_depth<<#0) //[0, 1]
} {
y1.cur = vmem(ptr_w++#1) //[0, 2]
s2.uw += vrmpy(y1.ub, x27x24.ub) //[0, 2]
s3.uw += vrmpy(y1.ub, x37x34.ub) //[0, 2]
x07x04_x03x00 = memd(ptr_x1++#1<<3) //[0, 2]stride = 4
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[0, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[0, 3]
dcfetch(fetch_ptr+#0<<5) //[0, 3]
fetch_ptr = add(fetch_ptr, #32) //[0, 3]
} {
s0.uw += vrmpy(y1.ub, x07x04.ub) //[0, 4]
s1.uw += vrmpy(y1.ub, x17x14.ub) //[0, 4]
x27x24_x23x20 = memd(ptr_x1+in_depth<<#1) //[1, 0]
x37x34_x33x30 = memd(ptr_x1+in_depth3<<#0) //[1, 0]
}:endloop0
/* ---------------------------------------------------------------------------- */
// note: p2=1, p3=0 doesn't happen.
.L_epi_only_1:
{ if(!p3) jump .L_even1 //for even filters
// recip_shamt uses x37x34 which has just become dead
recip_shamt = memw(PARMW(10)) // get recip_shamt from srack frame
}
{
y0.cur = vmem(ptr_w+#0) //[1, 1]
s2.uw += vrmpy(y0.ub, x23x20.ub) //[1, 1]
s3.uw += vrmpy(y0.ub, x33x30.ub) //[1, 1]
x13x10 = memw(ptr_x1+in_depth<<#0) //[1, 1]
} {
x03x00 = memw(ptr_x1+#0<<3) //[1, 2]
} {
s0.uw += vrmpy(y0.ub, x03x00.ub) //[1, 3]
s1.uw += vrmpy(y0.ub, x13x10.ub) //[1, 3]
}
.L_even1:
{ y2.w = vmpye(s2.w, recipvec.uh) //
p1=cmp.eq(recip_shamt,#0)
mino.w = vmin(mine.w, s2.w) //see if s2 is max
ptr_x0 = sub(ptr_x0, next_outputs) //reset data ptr to next 4
} {
y2.w+= vmpyo(s2.w, recipvec.h):SSR //
col_count = add(col_count, #-4) //
maxo.w = vmax(maxe.w, s2.w) //[E, 4]
if (!p1)jump .L_apply_shamt;
}
//NOTE: the code at .L_apply_shamt must duplicate effects
// of these two bypassed packets
{
y3.w = vmpye(s3.w, recipvec.uh) //#
mino.w = vmin(mino.w, s3.w) //[E, 4]see if s3 is max
} {
maxo.w = vmax(maxe.w, s3.w) //[E, 3]
y3.w+= vmpyo(s3.w, recipvec.h):SSR //
sum0 = memw(active_sum++M0) //#2<<2) //stride = 4
}
.L_back_from_apply_shamt:
{
p1 = cmp.eq(col_count, #0) //
maxe.w = vmax(maxo.w, s0.w) //see if s0 is max
mine.w = vmin(mino.w, s0.w) //see if s0 is max
y0.w = vmpye(s0.w, recipvec.uh) //
} {
d0 =vsplat(sum0)
maxe.w = vmax(maxe.w, s1.w) //
mine.w = vmin(mine.w, s1.w) //see if s1 is max
} {
y3.h = vpack(y3.w, y2.w):sat //#sat8 <0, >255
y0.w+= vmpyo(s0.w, recipvec.h):SSR //
sum1 = memw(active_sum++M0) //stride = 4
} {
loop1(.L_filt_height, filt_height) //[P, 1]for(fil=0;fil<h*depth/32;fil+=1){
s0.w = vadd(wsum.w, d0.w)
y1.w = vmpye(s1.w, recipvec.uh) //
} {
d1 = vsplat(sum1)
sum2 = memw(active_sum++M0) //stride = 4
} {
y1.w+= vmpyo(s1.w, recipvec.h):SSR //
s1.w = vadd(wsum.w, d1.w)
dcfetch(fetch_ptr+#0<<5) //[1, 2]
} {
d2 = vsplat(sum2)
sum3 = memw(active_sum++M0) //stride = 4
} {
s2.w = vadd(wsum.w, d2.w)
y1.h = vpack(y1.w, y0.w):sat //#>>16
ptr_w = memw(sp+#52) //[P, 0]ptr_wi initialize filter pointer
d3 = vsplat(sum3)
} {
s3.w = vadd(wsum.w, d3.w)
y3.ub = vpack(y3.h, y1.h):sat //#sat8 <0, >255
vmem(ptr_z++#1) = y3.new //#[E, ]store 2nd 32bytes
if(!p1) jump:t .L_width //
}//end cols per line
/* ---------------------------------------------------------------------------- */
{
p1 = cmp.eq(out_height, #0) //
if(!p1.new) jump:t .L_height //
}//end lines per block
/* ---------------------------------------------------------------------------- */
/* scale mine, maxe according to recipvec ; apply to overall range */
{
mino.w = vmpye(mine.w, recipvec.uh)
ptr_max = memw(PARMW(4)) //ptr pre computed max value in output
} {
mine = vmem(ptr_max+#1)
mino.w+= vmpyo(mine.w, recipvec.h):SSR
} {
mine.w = vmin( mine.w, mino.w)
vmem(ptr_max+#1) = mine.new //[E, 2]
maxo.w = vmpye(maxe.w, recipvec.uh)
} {
maxo.w+= vmpyo(maxe.w, recipvec.h):SSR
maxe = vmem(ptr_max+#0)
} {
maxe.w = vmax( maxe.w, maxo.w)
vmem(ptr_max+#0) = maxe.new //[E, 1]
}
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore stack
r19:18 = memd(sp+#8) //Q
} {
r21:20 = memd(sp+#16) //Q
r23:22 = memd(sp+#24) //Q
} {
r25:24 = memd(sp+#32) //Q
r27:26 = memd(sp+#40) //Q
} {
dealloc_return //Q
}
// * Shift s0,s1,s2,s3 left by recip_shamt
// * do the work which is skipped or invalidated in the main line:
// - update min/max for the *new* s2,s3 (results to mino,maxo);
// - find y2,y3 products for the *new* s2,s3'
// - load sum0
//
.L_apply_shamt:
{
sum0 = memw(active_sum++M0) //#2<<2) //stride = 4
s2.w = vasl(s2.w, recip_shamt)
} {
mino.w = vmin(mine.w, s2.w) //see if s2 is min
maxo.w = vmax(maxe.w, s2.w) //see if s2 is max
s3.w = vasl(s3.w, recip_shamt)
} {
y2.w = vmpye(s2.w, recipvec.uh)
mino.w = vmin(mino.w, s3.w) //see if s3 is min
maxo.w = vmax(maxo.w, s3.w) //see if s2 is max
} {
s0.w = vasl(s0.w, recip_shamt)
y2.w+= vmpyo(s2.w, recipvec.h):SSR //
} {
y3.w = vmpye(s3.w, recipvec.uh) //#
s1.w = vasl(s1.w, recip_shamt)
} {
y3.w+= vmpyo(s3.w, recipvec.h):SSR //
jump .L_back_from_apply_shamt;
}
.L_end:
/*=============================================================================*/
.size inconv2dbbb_v60_asm, .L_end-inconv2dbbb_v60_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 3,936 | hexagon/asm_src/copy3to4_h.S | /*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#if 0
void copy3to4(uint8_t * out4, uint8_t * in3, int n, int in_offset)
{
int i;
for(i=0; i < n; i++)
{
out4[4*i+0] = in3[3*i+0];
out4[4*i+1] = in3[3*i+1];
out4[4*i+2] = in3[3*i+2];
out4[4*i+3] = in_offset;
}
return;
}
#endif
/*======================================================================*/
.text
.global copy3to4_asm
.balign 32
.type copy3to4_asm, @function
copy3to4_asm:
/* ------------------------------------------------------------------- */
#define out4 r0
#define in3 r1
#define elemns r2
#define filt_offset r3
#define cntrl_tab r4
/* ------------------------------------------------------------------- */
#define vpred v0
#define vperm34 v1
#define vin_offset v2
#define d95_d00 v3
#define d127_d000 v4
/* ------------------------------------------------------------------- */
{ r5 = ##0x01000000 //4th byte mux in filt_offset
loop0(.L_loop34, elemns)
r7 = #96 //input does 96 bytes
} {
M0 = r7
vperm34 = vmem(cntrl_tab+#0)
vpred = vsplat(r5) //vmem(cntrl_tab+#1)
} {
filt_offset = vsplatb(filt_offset)
d95_d00 = vmemu(in3++M0) //increment by 96
r7 = ##0x01010101 //set up vpredicate
} {
q0 = vand(vpred, r7)
vin_offset = vsplat(filt_offset) //mux in vector
}
.balign 32
/* ------------------------------------------------------------------- */
.L_loop34:
{ d127_d000 = vdelta(d95_d00, vperm34) //choose 4th byte
} {
d127_d000 = vmux(q0,vin_offset,d127_d000)
d95_d00 = vmemu(in3++M0) //increment by 96
} {
vmem(out4++#1) = d127_d000 //store 128bytes
}:endloop0
/* ------------------------------------------------------------------- */
{ jumpr r31
}
.L_end:
.size copy3to4_asm, .L_end-copy3to4_asm
|
XiaoMi/nnlib | 31,033 | hexagon/asm_src/gvconv2db2b2b2_d32_h_v66.S | /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*======================================================================*/
/* FUNCTIONS : gvconv2dbbbb_asm */
/* */
/* DESCRIPTION */
/* Perform 2d convolution with input depth to otuput */
/* max, min computed and output scaled to 8bits */
/* */
/* ARCHITECTURE : QDSP6V6 + HVX */
/*======================================================================*/
/* REVISION HISTORY: */
/* ================= */
/* */
/* Author Date Comments */
/* ------------------------------------------------------------- */
/* DJH 04/21/17 created */
/* DJH 05/12/17 update api precomputed filt_offset */
/* DJH 05/16/17 Hoisted loop0 around to prolog and */
/* epilog of loop1 */
/*======================================================================*/
#if 0
#endif
/*=============================================================================*/
.text
.file "gvconv2db2b2b2_d32_h_v66.S"
.global gvconv2db2b2b2_d32_asm
.balign 32
.type gvconv2db2b2b2_d32_asm, @function
gvconv2db2b2b2_d32_asm:
/*=============================================================================*/
/*=============================================================================*/
#define ptr_xei r0 //data aligned 128
#define ptr_xoi r1 //data aligned 128
#define ptr_wi r2 //weights aligned 128
#define ptr_zei r3 //results aligned 128
#define ptr_zoi r4 //results aligned 128
#define in_width r5 //(pad_l+in_width+pad_r) => 4 %4
#define out_width in_width
#define out_width_depth r4 //0 value in bytes to get to next full out row
#define col_count r25 //1 out_width_pad
#define stride_h_w r26 //2 stride_height|stride_width
#define in_depth r27 //3 %32
#define filt_width r20 //4 >= 1
#define filt_height r8 //5 >= 1filt_height lines per filter
#define out_height r9 //6 >= 1 number of vertical lines to perform
#define ptr_biasadd r10 //7 aligned 128
#define ptr_minmax r12 //8 aligned 128
#define recip_level r14 //9 recip is 31bit unsigned 0x7f800000000LL / max
#define recip_shift r13 //10 recip is 31bit unsigned 0x7f800000000LL / max
#define out_align r6 //11 0, 32, 64, 96
#define skip_col r21 //12
/*=============================================================================*/
#define actvtn_stride r1 //distance between odd and even activations
#define ptr_wi1 r10 //
#define c8 r3 //
#define prod_cnt r12 //used to count through the 3 products
#define filt_cnt r11 //how many vertical filter rows there are
#define in_width_stride_h_depth r15 //in_width * stride_h * in_depth for next output
#define ptr_x0 r16 //
#define ptr_x1 r7 //
#define stride_w r18 //stride width
#define next_outputs r19 //jump to input ptr for next set of outputs
#define ptr_w r17 //
#define ptr_w_ptr_x0 r17:16//
#define in_width_32 r22 //
#define ptr_x2 r23 //
#define ptr_z0 r24 //
#define ptr_z1 r26 //
#define ptr_xij r27 //ptr_xi + j
#define scratch r0 //
#define scratch0 r1 //
#define prep_ptr r2 //pointer to pre-coded read ptrs for each part of mpy
/*=============================================================================*/
#define PV32(VSRC) .word (0x1DFFE020+VSRC)
#define s0 v0 //
#define s1 v1 //
#define s1s0 v1:0 //
#define s2 v2 //
#define s3 v3 //
#define s3s2 v3:2 //
#define s3s2s1s0 v3:0 //
#define w0 v21 //
#define x0 v4 //
#define x1 v5 //
#define x2 v6 //
#define x3 v7 //
#define x3210e v6 //
#define x3210o v21 //
#define x3210e_prev v16 //previous value
#define x3210o_prev v22 //previous value
#define xout v17 //realigned out
#define y10 v8 //
#define y0 v8 //
#define y1 v9 //
#define y32 v10 //
#define y2 v10 //
#define y3 v11 //
#define wsum v14 //initialzed to in_offsey*wsum + biasoffset
#define maxe v12 //
#define mine v18 //
#define biasvec v16 //
#define recipvec v15 //
#define vcrnd v20 //contain 0080 in all of the words
#define vzero v23 //
#define sk V24 //
#define RSS <<1:rnd:sat:shift //unverbose the insturction
#define SPVEC 704
/*=============================================================================
(sp+#56) 0 # w0*x0
(sp+#60) ptr_wi # w0*x0
(sp+#64) actvtn_stride #0 w0*x1
(sp+#68) ptr_wi #0 w0*x1
(sp+#72) 0 #1 w1*x0
(sp+#76) ptr_wi1 #1 w1*x0
(sp+#80) actvtn_stride #2 w1*x1
(sp+#84) ptr_wi1 #2 w1*x1
(sp+#88) #0 # guard against oob load
(sp+#92) #0 # guard against oob load
(sp+#96) ptr_zi
(sp+#100) ptr_ze
===============================================================================*/
{ allocframe(#(4*SPVEC+56)) //0th entry on stack is (512+56+8)/4 =16 ints
} {
memd(sp+#0) = r17:16 //save 16,17
memd(sp+#8) = r19:18 //save 18,19
ptr_xei = and(ptr_xei, #-2) //guarentee lsb is 0
ptr_xoi = and(ptr_xoi, #-2) //guarentee lsb is 0
} {
memd(sp+#16) = r21:20 //save 20,21
memd(sp+#24) = r23:22 //save 22,23
#if defined(FAST_16B_CONV)
r20 = ##0x00000080 //rounding value for middle products +80)>>8
#else
r20 = ##0x00008080 //rounding value for middle products +80)>>8
#endif
} {
vcrnd = vsplat(r20) //rounding value for >>8
ptr_minmax = memw(sp+#(SPVEC+24)<<2) //ptr pre computed max value in output
memw(sp+#68) = ptr_wi //save weights ptr
vzero = #0 //vector 0
} {
memd(sp+#32) = r25:24 //save 24,25
mine = vmem(ptr_minmax+#1) //get running min
y0 = vcrnd //
} {
memd(sp+#40) = r27:26 //save 26,27
maxe = vmem(ptr_minmax+#0) //get running max
} {
stride_h_w = memw(sp+#(SPVEC+18)<<2) //extract strides h + w
} {
stride_w = zxth(stride_h_w) //extract stride width
memw(sp+#60) = ptr_wi //save weights ptr
} {
stride_w = asl(stride_w, #2) //4*stride_w
} {
p0 = cmp.eq(stride_w, #8) //if stride_w = 2 * 4 modify z_buf pointer lsb
if(p0.new) ptr_xei = add(ptr_xei, #1) //make lsb of ptr 1 for stride = 2
if(p0.new) ptr_xoi = add(ptr_xoi, #1) //make lsb of ptr 1 for stride = 2
} {
memw(sp+#48) = ptr_xei //save ptr to activations
actvtn_stride = sub(ptr_xoi, ptr_xei)
in_depth = memw(sp+#(SPVEC+19)<<2) //get input depth
} {
filt_width = memw(sp+#(SPVEC+20)<<2) //extract filt_width
filt_height = memw(sp+#(SPVEC+21)<<2) //extract filt_height
} {
filt_height = mpy(filt_height.L,in_depth.L)//filt_height*in_depth
out_height = memw(sp+#(SPVEC+22)<<2) //number of output lines
ptr_biasadd = memw(sp+#(SPVEC+23)<<2) //ptr pre computed weight sum
filt_width = asl(filt_width, #1) //x2 to account for loop of 16bytes
} {
filt_height = lsr(filt_height, #5) //filt_height * in_depth / 32
recip_level = memw(sp+#(SPVEC+25)<<2) //get scalaer 32bit recip level
} {
recipvec = vsplat(recip_level) //spread recip_val across 32words
recip_shift = memw(sp+#(SPVEC+26)<<2) //can we flush align to do last col
skip_col = memw(sp+#(SPVEC+28)<<2) //can we flush align to do last col
} {
in_width_32 = asl(in_width, #5) //32 * in_width d32 line
wsum = vmem(ptr_biasadd+#0) //gemsumb + bias offsets
} {
ptr_wi1 = mpyi(filt_height, filt_width) //offset for hi bytes of weights
out_align = memw(sp+#(SPVEC+27)<<2) //output alignment 0,32,64,96
filt_width = add(filt_width, #-1) //account for epilog
} {
next_outputs = asl(stride_w, #5) //1,2 32*stride*4 i.e. 128 or 256
in_width_stride_h_depth= mpy(stride_h_w.H, in_depth.L) //
memw(sp+#96) = ptr_zei //save output ptr on stack
p3 = cmp.eq(out_align, #0) //if no alignment enable store
} {
in_width_stride_h_depth=mpyi(in_width,in_width_stride_h_depth) //total vertical stride bytes
stride_w = mpyi(stride_w, #24) //offset for z buf 96 or 192
memw(sp+#100) = ptr_zoi //save output ptr on stack
c8 = #0 //temp 0
} {
ptr_wi1 = asl(ptr_wi1, #9) //* 512 = 2/32*16*32
stride_w = add(stride_w, #4) //preset offset for z buf
memw(sp+#72) = c8 //d0 x w1 activtn offset
memw(sp+#88) = c8 //
} {
ptr_wi1 = add(ptr_wi, ptr_wi1) //ptr to odd bytes of weights
memw(sp+#64) = actvtn_stride //d1 x w0 activtn offset
memw(sp+#56) = c8 //
if (p3) y0 = vzero //
} {
memw(sp+#76) = ptr_wi1 //save d0 x w1 weight ptr
memw(sp+#84) = ptr_wi1 //save d1 x w1 weight ptr
q1 = vcmp.eq(vzero.w,y0.w) //
} {
memw(sp+#80) = actvtn_stride //d1 x w1 activations
scratch0 = add(sp, #127) //align stack to next 128b
q0 = or(q1, q1) //
} {
col_count = memw(sp+#(SPVEC+17)<<2) //read width of activations
scratch0 = and(scratch0, #-128) //align stack to next 128b
#if !defined(FAST_16B_CONV)
c8 = #8 //
#endif
} {
scratch0 = add(scratch0, #128) //align stack to next 128b
out_width_depth = memw(sp+#(SPVEC+16)<<2) //read width of activations
#if !defined(SPLIT_OUTPUT)
out_align = mux(p3,#0,#64) //
out_width = add(col_count,#-4) //
#endif
}
/*=============================================================================*/
.balign 64
/*=============================================================================*/
.L_height:
{
#if defined(FAST_16B_CONV)
ptr_w_ptr_x0 = memd(sp+#64) //[Pre-Width]initialize filter pointer & activation offset
#else
ptr_w_ptr_x0 = memd(sp+#56) //[Pre-Width]initialize filter pointer & activation offset
#endif
loop1(.L_filt_height, filt_height) //[Pre-Width]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
ptr_xij = memw(sp+#48) //initial main actvtn. ptr_xi
filt_cnt = add(filt_height, #-1) //pre-width]initialize filt height cntr
} {
loop0(.L_filt_width, filt_width) //[Pre-Width], 0]ki is k1/32 - 0
ptr_x0 = add(ptr_xij, ptr_x0) //[Pre-Width]odd activations + in_Depth_32
#if defined(FAST_16B_CONV)
prod_cnt = #3 //[Pre-Width]total 3 partial products
#else
prod_cnt = #4 //[Pre-Width]total 4 partial products
#endif
ptr_z0 = memw(sp+#96) //add(ptr_zi, #0)
} {
ptr_x2 = and(ptr_x0, #-128) //[Pre-Width]make loads aligned to 128 zero out bits 0-6
s1s0 = vcombine(vcrnd, vcrnd) //[Pre-Width]accumulator 0,1
#if defined(FAST_16B_CONV)
prep_ptr = add(sp, #(64+8)) //[Pre-Width]ptr to pre computed ptr list
#else
prep_ptr = add(sp, #(56+8)) //[Pre-Width]ptr to pre computed ptr list
#endif
p2 = !cmp.eq(r0,r0) //p2=0
} {
ptr_z1 = memw(sp+#100) //add(ptr_zi, #0)
z = vmem(ptr_x2+#0) //[Pre-Width][Pheight]load 0-127
s3s2 = vcombine(vcrnd,vcrnd) //[Pre-Width]accumulator 2,3
scratch = scratch0 //[Pre-Width]temp accumuator buffer
} {
p3 = cmp.eq(out_align, #0) //if no alignment enable store
z = vmem(ptr_x2+#1) //[Pre-Width]load 128-255
ptr_x1 = add(ptr_x0, stride_w) //[Pre-Width]setup initial pointer
ptr_x0 = add(ptr_x0, in_width_32) //[Pre-Width], 0]move to next even line of filter activations
}
/*=============================================================================*/
.balign 64
.L_width:
.L_products: //d1 * w0,d0 * w1,d1 * w1
.L_filt_height:
.L_filt_width:
{ w0.tmp = vmem(ptr_w++#1) //[0, 0]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 0]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[0, 1]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 1]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[0, 2]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 2]perform mac across 4 streams with saem weights
} {
w0.tmp = vmem(ptr_w++#1) //[0, 3]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 3]perform mac across 4 streams with saem weights
z = vmem(ptr_x1+#0) //[0, 3]load next stride=1 128 or stride=2 64 bytes
}:endloop0
/*=============================================================================*/
{ w0.tmp = vmem(ptr_w++#1) //[0, 4]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 4]perform mac across 4 streams with saem weights
p0 = cmp.eq(filt_cnt, #0) //[Kernel]count filt height itns.
if(p0.new) ptr_x0 = memw(prep_ptr++#1<<3) //[Width]initialize activation offset
} {
loop0(.L_filt_width, filt_width) //[P, 0]ki is k1/32 - 0
w0.tmp = vmem(ptr_w++#1) //[0, 5]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 5]perform mac across 4 streams with saem weights
if(p0) ptr_x0 = add(ptr_xij, ptr_x0) //[Width]create next activation ptr
} {
w0.tmp = vmem(ptr_w++#1) //[0, 6]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub++) //[0, 6]perform mac across 4 streams with saem weights
ptr_x2 = and(ptr_x0, #-128) //[P, 0]make loads aligned to 128 zero out bits 0-6
filt_cnt = add(filt_cnt, #-1) //decrement filt height cnt
} {
w0.tmp = vmem(ptr_w++#1) //[0, 7]load weights
s3s2s1s0.w += vrmpyz(w0.b, ptr_x1.ub ) //[0, 7]perform mac across 4 streams with saem weights
z = vmem(ptr_x2+#0) //[P, 3]load 0-127
ptr_x1 = add(ptr_x0, stride_w) //[P, 1]setup initial pointer
} {
ptr_x0 = add(ptr_x0, in_width_32) //[P, 2]move to next even line of filter activations
z = vmem(ptr_x2+#1) //[P, 4]load 128-255
if(p0) ptr_w = memw(prep_ptr+#-1<<2) //[Width]initialize filter pointer
}:endloop1
/*============================================================================*/
{ s0.w = vasr(s0.w, c8) //
loop1(.L_filt_height, filt_height) //[Width]for(filt_y=0;filt_y<height*in_depth/32;filt_y++){
prod_cnt = add(prod_cnt, #-1) //[Width]net partial product
p0 = cmp.eq(prod_cnt, #3) //[Width]end of partial products?
} {
s1.w = vasr(s1.w, c8) //
if (!p3) s0 = vzero //out_left_junk, so zero out the computation
} {
s2.w = vasr(s2.w, c8) //
p1 = cmp.eq(prod_cnt, #0) //[Width]end of partial products?
filt_cnt = add(filt_height, #-1) //[Kernel]
} {
s3.w = vasr(s3.w, c8) //
c8 = mux(p0,#8,#0) //
if(!p1) jump:t .L_products //[Width]next product
}
/*=============================================================================*/
.balign 64
{
#if !defined(FAST_16B_CONV)
c8 = #8 //[Post-Width]8bit shift for lower products
#endif
ptr_xij = add(ptr_xij, next_outputs) //[Post-Width]reset data ptr to next 4
sk = #0 //
#if defined(SPLIT_OUTPUT)
y32.uh = vpack(y3.w, y2.w):sat //[Post-Width-P]pack low 16bits together
#else
x3210o.uh = vpack(y3.w, y2.w):sat //[Post-Width]pack low 16bits together
#endif
} {
#if defined(SPLIT_OUTPUT)
x3210e.b = vpacke(y32.h, y10.h) //[Post-Width-P]
#endif
if (q0) s0.w += wsum.w //[Post-Width]
s1.w = vadd(s1.w, wsum.w) //[Post-Width]
s2.w = vadd(s2.w, wsum.w) //[Post-Width]
} {
#if defined(SPLIT_OUTPUT)
x3210o.b = vpacko(y32.h, y10.h) //[Post-Width-P]
#endif
p0 = cmp.ge(col_count,#2) //
p1 = cmp.ge(col_count,#3) //
col_count=add(col_count,#-4) //[Post-Width]count -=4 ptr_z += 128
} {
s3.w = vadd(s3.w, wsum.w) //[Post-Width]
maxe.w = vmax(maxe.w, s0.w) //[Post-Width]see if s0 is max
mine.w = vmin(mine.w, s0.w) //[Post-Width]see if s0 is min
s0.w = vasl(s0.w, recip_shift) //[Post-Width]
} {
if (p0) sk = s1 //
p0 = cmp.ge(col_count,#(4-4)) //
s1.w = vasl(s1.w, recip_shift) //[Post-Width]
#if defined(SPLIT_OUTPUT)
xout = vlalign(x3210e,x3210e_prev,out_align)//[Post-Width-P]
#else
xout = vlalign(x3210e,x3210o_prev,out_align)//[Post-Width-P]
#endif
} {
maxe.w = vmax(maxe.w, sk.w) //[Post-Width]
mine.w = vmin(mine.w, sk.w) //[Post-Width]see if z0 is max
if (p1) sk = s2 //
s2.w = vasl(s2.w, recip_shift) //[Post-Width]
} {
maxe.w = vmax(maxe.w, sk.w) //[Post-Width]
mine.w = vmin(mine.w, sk.w) //[Post-Width]see if z0 is max
if(p2)vmem(ptr_z0++#1):nt = xout //[Post-Width-P]store 2nd 32bytes
if (p0) sk = s3 //
} {
maxe.w = vmax(maxe.w, sk.w) //[Post-Width]
mine.w = vmin(mine.w, sk.w) //[Post-Width]see if z0 is max
s3.w = vasl(s3.w, recip_shift) //[Post-Width]
#if !defined(SPLIT_OUTPUT)
p0 = cmp.eq(col_count,out_width)
#endif
} {
#if defined(SPLIT_OUTPUT)
xout = vlalign(x3210o,x3210o_prev,out_align)//[Post-Width-P]align old and new data
#else
xout = vlalign(x3210o,x3210e,out_align) //[Post-Width]
#endif
y0.w = vmpye(s0.w, recipvec.uh) //[Post-Width](s2 * recip + rnd)>>31
#if defined(FAST_16B_CONV)
ptr_w_ptr_x0 = memd(sp+#64) //[Pre-Width]initialize filter pointer & activation offset
prep_ptr = add(sp, #(64+8)) //[Pre-Width]ptr to pre computed ptr list
#else
ptr_w_ptr_x0 = memd(sp+#56) //[Pre-Width]initialize filter pointer & activation offset
prep_ptr = add(sp, #(56+8)) //[Pre-Width]ptr to pre computed ptr list
#endif
} {
y0.w+= vmpyo(s0.w, recipvec.h):RSS //[Post-Width<<1:rnd:sat:shift
ptr_x0 = add(ptr_xij, ptr_x0) //[Pre-Width]odd activations + in_Depth_32
#if defined(FAST_16B_CONV)
prod_cnt = #3 //[Pre-Width]total 3 partial products
#else
prod_cnt = #4 //[Pre-Width]total 4 partial products
#endif
s0 = vcrnd //[Pre-Width]accumulator 0
} {
#if defined(SPLIT_OUTPUT)
if(p2)vmem(ptr_z1++#1):nt = xout //[Post-Width-P]store 2nd 32bytes
#else
if(!p0)vmem(ptr_z0++#1):nt = xout //[Post-Width-P]store 2nd 32bytes
#endif
x3210e_prev = x3210e //[Post-Width-P]save data for next output align
y1.w = vmpye(s1.w, recipvec.uh) //[Post-Width](s2 * recip + rnd)>>31
ptr_x2 = and(ptr_x0, #-128) //[Pre-Width]make loads aligned to 128 zero out bits 0-6
} {
y1.w+= vmpyo(s1.w, recipvec.h):RSS //[Post-Width<<1:rnd:sat:shift
s1 = vcrnd //[Pre-Width]accumulator 1
z = vmem(ptr_x2+#0) //[Pre-Width][Pheight]load 0-127
p2 = p3 //[Post-Width]
} {
y2.w = vmpye(s2.w, recipvec.uh) //[Post-Width](s2 * recip + rnd)>>31
z = vmem(ptr_x2+#1) //[Pre-Width]load 128-255
ptr_x1 = add(ptr_x0, stride_w) //[Pre-Width]setup initial pointer
scratch = scratch0 //[Pre-Width]temp accumuator buffer
} {
#if defined(SPLIT_OUTPUT)
y10.uh = vpack(y1.w, y0.w):sat //[Post-Width]pack low 16bits together
#else
x3210e.uh = vpack(y1.w, y0.w):sat //[Post-Width]pack low 16bits together
#endif
y2.w+= vmpyo(s2.w, recipvec.h):RSS //[Post-Width]<<1:rnd:sat:shift
ptr_x0 = add(ptr_x0, in_width_32) //[Pre-Width], 0]move to next even line of filter activations
p1 = cmp.gt(col_count, #0) //[Post-Width]compare for branch
} {
x3210o_prev = x3210o //[Post-Width-P]save data for next output align
y3.w = vmpye(s3.w, recipvec.uh) //[Post-Width](s2 * recip + rnd)>>31
q0 = vcmp.eq(x3210o.w,x3210o.w) //
p3 = cmp.eq(r0, r0) //[Post-Width]set to true
} {
y3.w+= vmpyo(s3.w, recipvec.h):RSS //[Post-Width]<<1:rnd:sat:shift
s2 = vcrnd //[Pre-Width]accumulator 2]
s3 = vcrnd //[Pre-Width]accumulator 3]
if(p1) jump:t .L_width //[Post-Width]
} //cols per line kernel loop width
/*=============================================================================*/
{ memw(sp+#48) += in_width_stride_h_depth //[Height]ptr_x+=2*in_width*stride_h*in_depth)
p0 = !cmp.eq(skip_col, #0) //[Height]
p3 = cmp.gt(out_align, #0) //
out_height = add(out_height, #-1) //
} {
#if defined(SPLIT_OUTPUT)
y32.uh = vpack(y3.w, y2.w):sat //[Post-Width]pack low 16bits together
#else
x3210o.uh = vpack(y3.w, y2.w):sat //[Post-Width]pack low 16bits together
#endif
memw(sp+#96) += out_width_depth //[Height]ptr_zi = add(ptr_zi, out_width_depth)
p0 = or(p0, p3) //
} {
#if defined(SPLIT_OUTPUT)
memw(sp+#100) += out_width_depth //[Height]ptr_zi = add(ptr_zi, out_width_depth)
#else
memw(sp+#96) += out_width_depth //[Height]ptr_zi = add(ptr_zi, out_width_depth)
#endif
col_count = memw(sp+#(SPVEC+17)<<2) //read width of activations
} {
#if defined(SPLIT_OUTPUT)
x3210e.b = vpacke(y32.h, y10.h) //[Post-Width]
#endif
q0 = or(q1, q1) //
} {
#if defined(SPLIT_OUTPUT)
x3210o.b = vpacko(y32.h, y10.h) //[Post-Width]
#endif
p1 = cmp.eq(out_height, #0) //[Height]
} {
#if defined(SPLIT_OUTPUT)
xout = vlalign(x3210e,x3210e_prev,out_align)//[Post-Width]
#else
xout = vlalign(x3210e,x3210o_prev,out_align)//[Post-Width]
#endif
if (p2) vmem(ptr_z0++#1):nt = xout.new //[Post-Width]store 2nd 32bytes
} {
#if defined(SPLIT_OUTPUT)
xout = vlalign(x3210o,x3210o_prev,out_align)//[Post-Width]
if (p2) vmem(ptr_z1++#1):nt = xout.new //[Post-Width]store 2nd 32bytes
#else
xout = vlalign(x3210o,x3210e,out_align) //[Post-Width]
vmem(ptr_z0++#1):nt = xout.new //[Post-Width]store 2nd 32bytes
p0 = cmp.eq(out_align, #0) //if no alignment enable store
#endif
} {
#if defined(SPLIT_OUTPUT)
xout = vlalign(x3210e, x3210e, out_align) //[Height]
if( p0) vmem(ptr_z0+#0):nt = xout.new //[Height]store 2nd 32bytes
} {
xout = vlalign(x3210o, x3210o, out_align) //[Height]
if( p0) vmem(ptr_z1+#0):nt = xout.new //[Height]store 2nd 32bytes
#else
xout = vlalign(x3210o,x3210o,out_align) //[Post-Width]
if(!p0) vmem(ptr_z0+#0):nt = xout.new //[Height]store 2nd 32bytes
#endif
if(!p1) jump:t .L_height //[Height]
}//end lines per block
/*=============================================================================*/
{ r17:16 = memd(sp+#0) //restore r16, r17from stack
ptr_minmax = memw(sp+#(SPVEC+24)<<2) //ptr pre computed max value in output
} {
vmem(ptr_minmax+#0) = maxe //[E, 0]32max
r19:18 = memd(sp+#8) //restore r18,r19
} {
vmem(ptr_minmax+#1) = mine //[E, 0]32min
r21:20 = memd(sp+#16) //restore r20,r21
} {
r23:22 = memd(sp+#24) //restore r22,r23
r25:24 = memd(sp+#32) //restore r24,r25
} {
r27:26 = memd(sp+#40) //restore r26,r27
dealloc_return //restore fram and return
}
.L_end:
/*=============================================================================*/
.size gvconv2db2b2b2_d32_asm, .L_end-gvconv2db2b2b2_d32_asm
/*=============================================================================*/
|
XiaoMi/nnlib | 4,946 | hexagon/asm_src/vrmaxmin_h.S | /*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* FUNCTIONS : gvrmaxmin
*
* DESCRIPTION
* find max/min of one vector
*
* ARCHITECTURE : QDSP6V60 + HVX
*
* REVISION HISTORY:
* =================
*
* Author Date Comments
* -------------------------------------------------------------
*
* CYCLE-COUNT:
*
* MEMORY
* CODESIZE = bytes
* STACK = bytes
* ASSUMPTIONS
*
*
* C MODEL
*/
/*=============================================================================*/
.text
.file "vrmaxmin_h.S"
.global gvrmaxmin
.balign 32
.type gvrmaxmin, @function
gvrmaxmin:
/*=============================================================================*/
#define ptr_max r0 //data
#define nrot r1 //constant
/*=============================================================================*/
#define max v0 //
#define max_t v1 //
#define min v2 //
#define min_t v3 //
/*=============================================================================*/
{ min = vmem(ptr_max+#1) //
nrot = #128/2 //
loop0(.L_peak, #4) //
nop //
} {
max.cur = vmem(ptr_max+#0) //
max_t = vror(max,nrot) //[0, 0]
nop; nop //
}
.L_peak:
{ min_t = vror(min,nrot) //[0, 1]
max.w = vmax(max.w, max_t.w) //[0, 1]
} {
nrot = lsr(nrot,#1) //[0, 2]
min.w = vmin(min.w, min_t.w) //[0, 2]
} {
max_t = vror(max,nrot) //[1, 0]
}:endloop0
{ min_t = vror(min,nrot) //[1, 1]
max.w = vmax(max.w, max_t.w) //[1, 1]
vmem(ptr_max+#0) = max.new //[E, 1]
} {
min.w = vmin(min.w, min_t.w) //[1, 2]
vmem(ptr_max+#1) = min.new //[E, 2]
}
/*=============================================================================*/
{ jumpr r31 //
}
.L_end:
/*=============================================================================*/
.size gvrmaxmin, .L_end-gvrmaxmin
/*=============================================================================*/
|
XiaoMi/nnlib | 7,081 | hexagon/asm_src/nn_os_fast.S |
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#define PTR r0
#define AMT r1
#define RET r3:2
#define OLDVAL r4
#define NEWVAL r5
#define MASK0 r6
.text
.global nn_sem_add_fastpath
.global nn_sem_add_fastpath_withret
nn_sem_add_fastpath:
nn_sem_add_fastpath_withret:
{
OLDVAL = memw_locked(PTR)
MASK0 = ##0xFFFF0000
}
{
p0 = bitsclr(OLDVAL,MASK0)
if (!p0.new) jump:nt ##nn_sem_add_slowpath_withret
NEWVAL = add(OLDVAL,AMT)
}
{
memw_locked(PTR,p0) = NEWVAL
}
if (!p0) jump:nt nn_sem_add_fastpath
{
jumpr r31
r1:0 = RET
}
.size nn_sem_add_fastpath_withret,.-nn_sem_add_fastpath_withret
.size nn_sem_add_fastpath,.-nn_sem_add_fastpath
.text
.global nn_sem_sub_fastpath
nn_sem_sub_fastpath:
{
OLDVAL = memw_locked(PTR)
}
{
p0 = cmph.gtu(AMT,OLDVAL)
if (p0.new) jump:nt ##nn_sem_sub_slowpath
NEWVAL = sub(OLDVAL,AMT)
}
{
memw_locked(PTR,p0) = NEWVAL
}
if (!p0) jump:nt nn_sem_sub_fastpath
jumpr r31
.size nn_sem_sub_fastpath,.-nn_sem_sub_fastpath
.text
.global nn_mutex_lock_fastpath
nn_mutex_lock_fastpath:
{
OLDVAL = memw_locked(PTR)
}
{
p0 = cmp.eq(OLDVAL,#0)
if (!p0.new) jump:nt ##nn_mutex_lock_slowpath
NEWVAL = #1
}
{
memw_locked(PTR,p0) = NEWVAL
}
if (!p0) jump:nt nn_mutex_lock_fastpath
jumpr r31
.size nn_mutex_lock_fastpath,.-nn_mutex_lock_fastpath
.text
.global nn_mutex_unlock_fastpath
nn_mutex_unlock_fastpath:
{
OLDVAL = memw_locked(PTR)
}
{
p0 = cmp.eq(OLDVAL,#1)
if (!p0.new) jump:nt ##nn_mutex_unlock_slowpath
NEWVAL = #0
}
{
memw_locked(PTR,p0) = NEWVAL
}
if (!p0) jump:nt nn_mutex_unlock_fastpath
jumpr r31
.size nn_mutex_unlock_fastpath,.-nn_mutex_unlock_fastpath
#undef PTR
#undef AMT
#undef OLDVAL
#undef NEWVAL
#undef MASK0
#define OFFSET_HOWFULL 0
#define OFFSET_RECVIDX 4
#define OFFSET_RECVIDX_HOWFULL 0
#define OFFSET_HOWEMPTY 8
#define OFFSET_MUTEX 12
#define OFFSET_MUTEX_HOWEMPTY 8
#define OFFSET_SENDIDX 16
#define OFFSET_DATAPTR 24
#define OFFSET_ELEMENTS 28
#define OFFSET_ELEMENTS_DATAPTR 24
#define PIPE r0
#define DATAPTR r14
#define ELEMENTS r15
#define ELEMENTS_DATAPTR r15:14
#define HOWFULL r2
#define RECVIDX r3
#define RECVIDX_HOWFULL r3:2
#define NEWFULL r6
#define NEWIDX r7
#define NEWIDX_FULL r7:6
#define DATA r9:8
#define SPINS r28
.text
.global nn_pipe_recv_fastpath
nn_pipe_recv_fastpath:
{
ELEMENTS_DATAPTR = memd(PIPE+#OFFSET_ELEMENTS_DATAPTR)
SPINS = #31
}
.Lrecv_busy_spin:
RECVIDX_HOWFULL = memd_locked(PIPE)
{
p0 = cmph.gtu(HOWFULL,#0)
if (!p0.new) jump:nt .Lrecv_busy
NEWIDX = add(RECVIDX,#1)
NEWFULL = add(HOWFULL,#-1)
}
{
DATA=memd(DATAPTR+RECVIDX<<#3)
p0 = cmp.eq(NEWIDX,ELEMENTS)
if (p0.new) NEWIDX = #0
}
memd_locked(PIPE,p0) = NEWIDX_FULL
if (!p0) jump:nt .Lrecv_busy_spin // not atomic
// Data received. Now we need to increment howempty.
{
r1 = #1
r0 = add(PIPE,#OFFSET_HOWEMPTY)
r3:2 = DATA
jump nn_sem_add_fastpath_withret
}
.Lrecv_busy:
pause(#10)
{
SPINS = add(SPINS,#-1)
p0 = cmp.eq(SPINS,#0)
if (!p0.new) jump:t .Lrecv_busy_spin
}
{
jump nn_pipe_recv_slowpath
}
#undef RECVIDX_HOWFULL
#undef RECVIDX
#undef HOWFULL
#undef NEWIDX_FULL
#undef NEWIDX
#undef NEWFULL
#define SENDDATA r1
#define N_ITEMS r2
#define ATOMIC_PTR r3
#define HOWEMPTY r4
#define MUTEX r5
#define MUTEX_HOWEMPTY r5:4
#define NEWMUTEX r7
#define NEWEMPTY r6
#define NEWMUTEX_EMPTY r7:6
#define SENDIDX r10
#define NEWIDX r11
.text
.global nn_pipe_send_multi_fastpath
nn_pipe_send_multi_fastpath:
{
ELEMENTS_DATAPTR = memd(PIPE+#OFFSET_ELEMENTS_DATAPTR)
SPINS = #31
NEWMUTEX = #1
ATOMIC_PTR = add(PIPE,#OFFSET_MUTEX_HOWEMPTY)
}
{
loop0(.Lsend_copy_loop,N_ITEMS)
p1 = cmp.eq(N_ITEMS,#0)
if (p1.new) jump:nt .Lsend_done
}
.Lsend_busy_spin:
{
MUTEX_HOWEMPTY = memd_locked(ATOMIC_PTR)
}
{
p0 = cmp.eq(MUTEX,#0)
p0 = !cmp.gt(N_ITEMS,HOWEMPTY)
if (!p0.new) jump:nt .Lsend_busy
NEWEMPTY = sub(HOWEMPTY,N_ITEMS)
}
{
memd_locked(ATOMIC_PTR,p0) = NEWMUTEX_EMPTY
}
// OK, here we have the lock and there's enough room in the pipe
// Copy in n_items of data
{
if (!p0) jump:nt .Lsend_busy_spin
SENDIDX = memw(PIPE+#OFFSET_SENDIDX)
}
.falign
.Lsend_copy_loop:
{
DATA = memd(SENDDATA++#8)
dcfetch(SENDDATA+#64)
NEWIDX = add(SENDIDX,#1)
}
{
memd(DATAPTR+SENDIDX<<#3) = DATA
p0 = cmp.eq(NEWIDX,ELEMENTS)
if (p0.new) SENDIDX = #0
if (!p0.new) SENDIDX = NEWIDX
}:endloop0
// Write send_idx
memw(PIPE+#OFFSET_SENDIDX) = SENDIDX
// Unlock mutex
.Lsend_unlock_spin:
MUTEX_HOWEMPTY = memd_locked(ATOMIC_PTR)
{
p0 = cmp.eq(MUTEX,#1)
if (!p0.new) jump:nt .Lsend_someone_blocked_mutex
MUTEX = #0
}
memd_locked(ATOMIC_PTR,p0) = MUTEX_HOWEMPTY
if (!p0) jump:nt .Lsend_unlock_spin
// increment howfull
{
#if OFFSET_HOWFULL != 0
#error fixme: offset assumed
#endif
r1:0 = combine(N_ITEMS,PIPE)
jump nn_sem_add_fastpath
}
.Lsend_done:
jumpr r31
.Lsend_busy:
pause(#10)
{
SPINS = add(SPINS,#-1)
p0 = cmp.eq(SPINS,#0)
if (!p0.new) jump:t .Lsend_busy_spin
}
{
jump nn_pipe_send_multi_slowpath
}
.Lsend_someone_blocked_mutex:
{
allocframe(#8)
DATA = combine(N_ITEMS,PIPE)
}
memd(r29+#0) = DATA
r0 = add(PIPE,#OFFSET_MUTEX)
call nn_mutex_unlock_slowpath
r1:0 = memd(r29+#0)
deallocframe
jump nn_sem_add_fastpath
.size nn_pipe_send_multi_fast,.-nn_pipe_send_multi_fast
|
xiao-tai/ics2021 | 1,243 | abstract-machine/am/src/riscv/nemu/trap.S | #define concat_temp(x, y) x ## y
#define concat(x, y) concat_temp(x, y)
#define MAP(c, f) c(f)
#if __riscv_xlen == 32
#define LOAD lw
#define STORE sw
#define XLEN 4
#else
#define LOAD ld
#define STORE sd
#define XLEN 8
#endif
#define REGS(f) \
f( 1) f( 3) f( 4) f( 5) f( 6) f( 7) f( 8) f( 9) \
f(10) f(11) f(12) f(13) f(14) f(15) f(16) f(17) f(18) f(19) \
f(20) f(21) f(22) f(23) f(24) f(25) f(26) f(27) f(28) f(29) \
f(30) f(31)
#define PUSH(n) STORE concat(x, n), (n * XLEN)(sp);
#define POP(n) LOAD concat(x, n), (n * XLEN)(sp);
#define CONTEXT_SIZE ((32 + 3 + 1) * XLEN)
#define OFFSET_SP ( 2 * XLEN)
#define OFFSET_CAUSE (32 * XLEN)
#define OFFSET_STATUS (33 * XLEN)
#define OFFSET_EPC (34 * XLEN)
.align 3
.globl __am_asm_trap
__am_asm_trap:
addi sp, sp, -CONTEXT_SIZE
MAP(REGS, PUSH)
csrr t0, mcause
csrr t1, mstatus
csrr t2, mepc
STORE t0, OFFSET_CAUSE(sp)
STORE t1, OFFSET_STATUS(sp)
STORE t2, OFFSET_EPC(sp)
# set mstatus.MPRV to pass difftest
li a0, (1 << 17)
or t1, t1, a0
csrw mstatus, t1
mv a0, sp
jal __am_irq_handle
LOAD t1, OFFSET_STATUS(sp)
LOAD t2, OFFSET_EPC(sp)
csrw mstatus, t1
csrw mepc, t2
MAP(REGS, POP)
addi sp, sp, CONTEXT_SIZE
mret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.