diff --git a/driver/others/blas_server.c b/driver/others/blas_server.c index e3ebc7a7e8..80c28ebda9 100644 --- a/driver/others/blas_server.c +++ b/driver/others/blas_server.c @@ -1074,13 +1074,10 @@ fprintf(STDERR, "Server[%2ld] Calculation started. Mode = 0x%03x M = %3ld N=%3l main_status[cpu] = MAIN_RUNNING1; #endif -//For Loongson servers, like the 3C5000 (featuring 16 cores), applying an -//offset to the buffer is essential for minimizing cache conflicts and optimizing performance. -#if defined(LOONGSON3R5) && !defined(NO_AFFINITY) - char model_name[128]; - get_cpu_model(model_name); - if ((strstr(model_name, "3C5000") != NULL) || (strstr(model_name, "3D5000") != NULL)) - if (sa == NULL) sa = (void *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); +//For target LOONGSON3R5, applying an offset to the buffer is essential +//for minimizing cache conflicts and optimizing performance. +#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY) + if (sa == NULL) sa = (void *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); #endif if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A); @@ -1157,4 +1154,4 @@ if (queue -> mode & BLAS_PTHREAD) { } -#endif \ No newline at end of file +#endif diff --git a/interface/gemm.c b/interface/gemm.c index c402836ca3..4537b6a78f 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -521,15 +521,10 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS buffer = (XFLOAT *)blas_memory_alloc(0); -//For Loongson servers, like the 3C5000 (featuring 16 cores), applying an -//offset to the buffer is essential for minimizing cache conflicts and optimizing performance. -#if defined(LOONGSON3R5) && !defined(NO_AFFINITY) - char model_name[128]; - get_cpu_model(model_name); - if ((strstr(model_name, "3C5000") != NULL) || (strstr(model_name, "3D5000") != NULL)) - sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); - else - sa = (XFLOAT *)((BLASLONG)buffer + GEMM_OFFSET_A); +//For target LOONGSON3R5, applying an offset to the buffer is essential +//for minimizing cache conflicts and optimizing performance. +#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY) + sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); #else sa = (XFLOAT *)((BLASLONG)buffer +GEMM_OFFSET_A); #endif diff --git a/kernel/generic/laswp_ncopy_6.c b/kernel/generic/laswp_ncopy_6.c new file mode 100644 index 0000000000..85a17a092f --- /dev/null +++ b/kernel/generic/laswp_ncopy_6.c @@ -0,0 +1,276 @@ + +/*********************************************************************/ +/* Copyright 2009, 2010, 2024 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#define PREFETCHSIZE 4 + +int CNAME(BLASLONG n, BLASLONG k1, BLASLONG k2, FLOAT *a, BLASLONG lda, blasint *ipiv, FLOAT *buffer){ + + BLASLONG i, j, ip; + blasint *piv; + FLOAT *dx1, *dy1; + FLOAT *dx2, *dy2; + FLOAT *dx3, *dy3; + FLOAT *dx4, *dy4; + FLOAT *dx5, *dy5; + FLOAT *dx6, *dy6; + FLOAT atemp1, btemp1; + FLOAT atemp2, btemp2; + FLOAT atemp3, btemp3; + FLOAT atemp4, btemp4; + FLOAT atemp5, btemp5; + FLOAT atemp6, btemp6; + + a--; + ipiv += k1 - 1; + + if (n <= 0) return 0; + if (k1 > k2) return 0; + + j = (n / 6); + if (j > 0) { + do { + piv = ipiv; + i = k1; + + do { + ip = *piv; + piv ++; + + dx1 = a + i; + dy1 = a + ip; + dx2 = a + i + lda * 1; + dy2 = a + ip + lda * 1; + dx3 = a + i + lda * 2; + dy3 = a + ip + lda * 2; + dx4 = a + i + lda * 3; + dy4 = a + ip + lda * 3; + dx5 = a + i + lda * 4; + dy5 = a + ip + lda * 4; + dx6 = a + i + lda * 5; + dy6 = a + ip + lda * 5; + +#ifdef __GNUC__ + __builtin_prefetch(dx1 + PREFETCHSIZE, 0, 1); + __builtin_prefetch(dx2 + PREFETCHSIZE, 0, 1); + __builtin_prefetch(dx3 + PREFETCHSIZE, 0, 1); + __builtin_prefetch(dx4 + PREFETCHSIZE, 0, 1); + __builtin_prefetch(dx5 + PREFETCHSIZE, 0, 1); + __builtin_prefetch(dx6 + PREFETCHSIZE, 0, 1); +#endif + + atemp1 = *dx1; + btemp1 = *dy1; + atemp2 = *dx2; + btemp2 = *dy2; + atemp3 = *dx3; + btemp3 = *dy3; + atemp4 = *dx4; + btemp4 = *dy4; + + atemp5 = *dx5; + btemp5 = *dy5; + atemp6 = *dx6; + btemp6 = *dy6; + + if (ip != i) { + *dy1 = atemp1; + *dy2 = atemp2; + *dy3 = atemp3; + *dy4 = atemp4; + *dy5 = atemp5; + *dy6 = atemp6; + *(buffer + 0) = btemp1; + *(buffer + 1) = btemp2; + *(buffer + 2) = btemp3; + *(buffer + 3) = btemp4; + *(buffer + 4) = btemp5; + *(buffer + 5) = btemp6; + } else { + *(buffer + 0) = atemp1; + *(buffer + 1) = atemp2; + *(buffer + 2) = atemp3; + *(buffer + 3) = atemp4; + *(buffer + 4) = atemp5; + *(buffer + 5) = atemp6; + } + + buffer += 6; + + i++; + } while (i <= k2); + + a += 6 * lda; + j --; + } while (j > 0); + } + + if ((n % 6) & 4) { + piv = ipiv; + + ip = *piv; + piv ++; + + dx1 = a + k1; + dy1 = a + ip; + dx2 = a + k1 + lda * 1; + dy2 = a + ip + lda * 1; + dx3 = a + k1 + lda * 2; + dy3 = a + ip + lda * 2; + dx4 = a + k1 + lda * 3; + dy4 = a + ip + lda * 3; + + i = k1; + + do { + atemp1 = *dx1; + atemp2 = *dx2; + atemp3 = *dx3; + atemp4 = *dx4; + + btemp1 = *dy1; + btemp2 = *dy2; + btemp3 = *dy3; + btemp4 = *dy4; + + if (ip != i) { + *dy1 = atemp1; + *dy2 = atemp2; + *dy3 = atemp3; + *dy4 = atemp4; + *(buffer + 0) = btemp1; + *(buffer + 1) = btemp2; + *(buffer + 2) = btemp3; + *(buffer + 3) = btemp4; + } else { + *(buffer + 0) = atemp1; + *(buffer + 1) = atemp2; + *(buffer + 2) = atemp3; + *(buffer + 3) = atemp4; + } + + ip = *piv; + piv ++; + + i++; + dx1 = a + i; + dy1 = a + ip; + dx2 = a + i + lda * 1; + dy2 = a + ip + lda * 1; + dx3 = a + i + lda * 2; + dy3 = a + ip + lda * 2; + dx4 = a + i + lda * 3; + dy4 = a + ip + lda * 3; + + buffer += 4; + + } while (i <= k2); + + a += 4 * lda; + } + + if ((n % 6) & 2) { + piv = ipiv; + + i = k1; + do { + ip = *piv; + piv ++; + + dx1 = a + i; + dy1 = a + ip; + dx2 = a + i + lda; + dy2 = a + ip + lda; + + atemp1 = *dx1; + btemp1 = *dy1; + atemp2 = *dx2; + btemp2 = *dy2; + + if (ip != i) { + *dy1 = atemp1; + *dy2 = atemp2; + *(buffer + 0) = btemp1; + *(buffer + 1) = btemp2; + } else { + *(buffer + 0) = atemp1; + *(buffer + 1) = atemp2; + } + + buffer += 2; + + i++; + } while (i <= k2); + + a += 2 * lda; + } + + + if ((n % 6) & 1) { + piv = ipiv; + + i = k1; + do { + ip = *piv; + piv ++; + + dx1 = a + i; + dy1 = a + ip; + atemp1 = *dx1; + btemp1 = *dy1; + + if (ip != i) { + *dy1 = atemp1; + *buffer = btemp1; + } else { + *buffer = atemp1; + } + + buffer ++; + + i++; + } while (i <= k2); + + // a += lda; + } + + return 0; +} \ No newline at end of file diff --git a/kernel/generic/symm_lcopy_6.c b/kernel/generic/symm_lcopy_6.c index ca730e1eef..3a3e2d5b2f 100644 --- a/kernel/generic/symm_lcopy_6.c +++ b/kernel/generic/symm_lcopy_6.c @@ -41,98 +41,141 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js, offset; - - FLOAT data01, data02, data03, data04; - FLOAT *ao1, *ao2, *ao3, *ao4; + BLASLONG i, js, offset; + + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = (n / 6); + while (js > 0){ + + offset = posX - posY; + + if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; + if (offset > -1) ao2 = a + posX + 1 + posY * lda; else ao2 = a + posY + (posX + 1) * lda; + if (offset > -2) ao3 = a + posX + 2 + posY * lda; else ao3 = a + posY + (posX + 2) * lda; + if (offset > -3) ao4 = a + posX + 3 + posY * lda; else ao4 = a + posY + (posX + 3) * lda; + if (offset > -4) ao5 = a + posX + 4 + posY * lda; else ao5 = a + posY + (posX + 4) * lda; + if (offset > -5) ao6 = a + posX + 5 + posY * lda; else ao6 = a + posY + (posX + 5) * lda; + + i = m; + + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); + data03 = *(ao3 + 0); + data04 = *(ao4 + 0); + data05 = *(ao5 + 0); + data06 = *(ao6 + 0); + + if (offset > 0) ao1 += lda; else ao1 ++; + if (offset > -1) ao2 += lda; else ao2 ++; + if (offset > -2) ao3 += lda; else ao3 ++; + if (offset > -3) ao4 += lda; else ao4 ++; + if (offset > -4) ao5 += lda; else ao5 ++; + if (offset > -5) ao6 += lda; else ao6 ++; + + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data03; + b[ 3] = data04; + b[ 4] = data05; + b[ 5] = data06; + + b += 6; + + offset --; + i --; + } + + posX += 6; + js --; + } - js = (n >> 2); - while (js > 0){ + if ((n - n/6) & 4) { + offset = posX - posY; - offset = posX - posY; + if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; + if (offset > -1) ao2 = a + posX + 1 + posY * lda; else ao2 = a + posY + (posX + 1) * lda; + if (offset > -2) ao3 = a + posX + 2 + posY * lda; else ao3 = a + posY + (posX + 2) * lda; + if (offset > -3) ao4 = a + posX + 3 + posY * lda; else ao4 = a + posY + (posX + 3) * lda; - if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; - if (offset > -1) ao2 = a + posX + 1 + posY * lda; else ao2 = a + posY + (posX + 1) * lda; - if (offset > -2) ao3 = a + posX + 2 + posY * lda; else ao3 = a + posY + (posX + 2) * lda; - if (offset > -3) ao4 = a + posX + 3 + posY * lda; else ao4 = a + posY + (posX + 3) * lda; + i = m; - i = m; + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); + data03 = *(ao3 + 0); + data04 = *(ao4 + 0); - while (i > 0) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); - data03 = *(ao3 + 0); - data04 = *(ao4 + 0); + if (offset > 0) ao1 += lda; else ao1 ++; + if (offset > -1) ao2 += lda; else ao2 ++; + if (offset > -2) ao3 += lda; else ao3 ++; + if (offset > -3) ao4 += lda; else ao4 ++; - if (offset > 0) ao1 += lda; else ao1 ++; - if (offset > -1) ao2 += lda; else ao2 ++; - if (offset > -2) ao3 += lda; else ao3 ++; - if (offset > -3) ao4 += lda; else ao4 ++; + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data03; + b[ 3] = data04; - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; + b += 4; - b += 4; + offset --; + i --; + } - offset --; - i --; + posX += 4; } - posX += 4; - js --; - } + if ((n - n/6) & 2) { - if (n & 2) { + offset = posX - posY; - offset = posX - posY; + if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; + if (offset > -1) ao2 = a + posX + 1 + posY * lda; else ao2 = a + posY + (posX + 1) * lda; - if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; - if (offset > -1) ao2 = a + posX + 1 + posY * lda; else ao2 = a + posY + (posX + 1) * lda; + i = m; - i = m; + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); - while (i > 0) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); + if (offset > 0) ao1 += lda; else ao1 ++; + if (offset > -1) ao2 += lda; else ao2 ++; - if (offset > 0) ao1 += lda; else ao1 ++; - if (offset > -1) ao2 += lda; else ao2 ++; + b[ 0] = data01; + b[ 1] = data02; - b[ 0] = data01; - b[ 1] = data02; + b += 2; - b += 2; + offset --; + i --; + } - offset --; - i --; + posX += 2; } - posX += 2; - } - - if (n & 1) { + if ((n - n/6) & 1) { - offset = posX - posY; + offset = posX - posY; - if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; + if (offset > 0) ao1 = a + posX + 0 + posY * lda; else ao1 = a + posY + (posX + 0) * lda; - i = m; + i = m; - while (i > 0) { - data01 = *(ao1 + 0); + while (i > 0) { + data01 = *(ao1 + 0); - if (offset > 0) ao1 += lda; else ao1 ++; + if (offset > 0) ao1 += lda; else ao1 ++; - b[ 0] = data01; + b[ 0] = data01; - b ++; + b ++; - offset --; - i --; + offset --; + i --; + } } - } - return 0; + return 0; } diff --git a/kernel/generic/symm_ucopy_6.c b/kernel/generic/symm_ucopy_6.c index 6dbb861e98..a83d937d0b 100644 --- a/kernel/generic/symm_ucopy_6.c +++ b/kernel/generic/symm_ucopy_6.c @@ -41,96 +41,140 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js, offset; + BLASLONG i, js, offset; + + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = (n / 6); + while (js > 0){ + + offset = posX - posY; + + if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; + if (offset > -1) ao2 = a + posY + (posX + 1) * lda; else ao2 = a + posX + 1 + posY * lda; + if (offset > -2) ao3 = a + posY + (posX + 2) * lda; else ao3 = a + posX + 2 + posY * lda; + if (offset > -3) ao4 = a + posY + (posX + 3) * lda; else ao4 = a + posX + 3 + posY * lda; + if (offset > -4) ao5 = a + posY + (posX + 4) * lda; else ao5 = a + posX + 4 + posY * lda; + if (offset > -5) ao6 = a + posY + (posX + 5) * lda; else ao6 = a + posX + 5 + posY * lda; + + i = m; + + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); + data03 = *(ao3 + 0); + data04 = *(ao4 + 0); + data05 = *(ao5 + 0); + data06 = *(ao6 + 0); + + if (offset > 0) ao1 ++; else ao1 += lda; + if (offset > -1) ao2 ++; else ao2 += lda; + if (offset > -2) ao3 ++; else ao3 += lda; + if (offset > -3) ao4 ++; else ao4 += lda; + if (offset > -4) ao5 ++; else ao5 += lda; + if (offset > -5) ao6 ++; else ao6 += lda; + + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data03; + b[ 3] = data04; + b[ 4] = data05; + b[ 5] = data06; + + b += 6; + + offset --; + i --; + } + + posX += 6; + js --; + } - FLOAT data01, data02, data03, data04; - FLOAT *ao1, *ao2, *ao3, *ao4; + if ((n - n/6) & 4) { - js = (n >> 2); - while (js > 0){ + offset = posX - posY; - offset = posX - posY; + if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; + if (offset > -1) ao2 = a + posY + (posX + 1) * lda; else ao2 = a + posX + 1 + posY * lda; + if (offset > -2) ao3 = a + posY + (posX + 2) * lda; else ao3 = a + posX + 2 + posY * lda; + if (offset > -3) ao4 = a + posY + (posX + 3) * lda; else ao4 = a + posX + 3 + posY * lda; - if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; - if (offset > -1) ao2 = a + posY + (posX + 1) * lda; else ao2 = a + posX + 1 + posY * lda; - if (offset > -2) ao3 = a + posY + (posX + 2) * lda; else ao3 = a + posX + 2 + posY * lda; - if (offset > -3) ao4 = a + posY + (posX + 3) * lda; else ao4 = a + posX + 3 + posY * lda; + i = m; - i = m; + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); + data03 = *(ao3 + 0); + data04 = *(ao4 + 0); - while (i > 0) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); - data03 = *(ao3 + 0); - data04 = *(ao4 + 0); + if (offset > 0) ao1 ++; else ao1 += lda; + if (offset > -1) ao2 ++; else ao2 += lda; + if (offset > -2) ao3 ++; else ao3 += lda; + if (offset > -3) ao4 ++; else ao4 += lda; - if (offset > 0) ao1 ++; else ao1 += lda; - if (offset > -1) ao2 ++; else ao2 += lda; - if (offset > -2) ao3 ++; else ao3 += lda; - if (offset > -3) ao4 ++; else ao4 += lda; + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data03; + b[ 3] = data04; - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; + b += 4; - b += 4; + offset --; + i --; + } - offset --; - i --; + posX += 4; } - posX += 4; - js --; - } + if ((n - n/6) & 2) { + offset = posX - posY; - if (n & 2) { - offset = posX - posY; + if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; + if (offset > -1) ao2 = a + posY + (posX + 1) * lda; else ao2 = a + posX + 1 + posY * lda; - if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; - if (offset > -1) ao2 = a + posY + (posX + 1) * lda; else ao2 = a + posX + 1 + posY * lda; + i = m; - i = m; + while (i > 0) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); - while (i > 0) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); + if (offset > 0) ao1 ++; else ao1 += lda; + if (offset > -1) ao2 ++; else ao2 += lda; - if (offset > 0) ao1 ++; else ao1 += lda; - if (offset > -1) ao2 ++; else ao2 += lda; + b[ 0] = data01; + b[ 1] = data02; - b[ 0] = data01; - b[ 1] = data02; + b += 2; - b += 2; + offset --; + i --; + } - offset --; - i --; + posX += 2; } - posX += 2; - } - - if (n & 1) { - offset = posX - posY; + if ((n - n/6) & 1) { + offset = posX - posY; - if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; + if (offset > 0) ao1 = a + posY + (posX + 0) * lda; else ao1 = a + posX + 0 + posY * lda; - i = m; + i = m; - while (i > 0) { - data01 = *(ao1 + 0); + while (i > 0) { + data01 = *(ao1 + 0); - if (offset > 0) ao1 ++; else ao1 += lda; + if (offset > 0) ao1 ++; else ao1 += lda; - b[ 0] = data01; + b[ 0] = data01; - b ++; + b ++; - offset --; - i --; + offset --; + i --; + } } - } - return 0; + return 0; } diff --git a/kernel/generic/trmm_lncopy_6.c b/kernel/generic/trmm_lncopy_6.c index 0dcfb965ac..999f0d367d 100644 --- a/kernel/generic/trmm_lncopy_6.c +++ b/kernel/generic/trmm_lncopy_6.c @@ -41,444 +41,510 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js; - BLASLONG X; - - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *ao1, *ao2, *ao3, *ao4; - - js = (n >> 2); - - if (js > 0){ - do { - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - ao3 = a + posY + (posX + 2) * lda; - ao4 = a + posY + (posX + 3) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - ao3 = a + posX + (posY + 2) * lda; - ao4 = a + posX + (posY + 3) * lda; - } - - i = (m >> 2); - if (i > 0) { - do { - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b[ 4] = data02; - b[ 5] = data06; - b[ 6] = data10; - b[ 7] = data14; - - b[ 8] = data03; - b[ 9] = data07; - b[10] = data11; - b[11] = data15; - b[12] = data04; - b[13] = data08; - b[14] = data12; - b[15] = data16; - - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - - } else - if (X < posY) { - ao1 += 4 * lda; - ao2 += 4 * lda; - ao3 += 4 * lda; - ao4 += 4 * lda; - b += 16; - - } else { + BLASLONG i, js, ii; + BLASLONG X; + + FLOAT data01, data02, data05, data06; + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = (n / 6); + + if (js > 0){ + do { + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + ao5 = a + posY + (posX + 4) * lda; + ao6 = a + posY + (posX + 5) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + ao5 = a + posX + (posY + 4) * lda; + ao6 = a + posX + (posY + 5) * lda; + } + + i = (m / 6); + if (i > 0) { + do { + if (X > posY) { + for (ii = 0; ii < 6; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + ao5 ++; + ao6 ++; + b += 6; + } + + } else if (X < posY) { + ao1 += 6 * lda; + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + b += 36; + + } else { #ifdef UNIT - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data12 = *(ao3 + 3); - - b[ 0] = ONE; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b[ 4] = data02; - b[ 5] = ONE; - b[ 6] = ZERO; - b[ 7] = ZERO; - - b[ 8] = data03; - b[ 9] = data07; - b[10] = ONE; - b[11] = ZERO; - b[12] = data04; - b[13] = data08; - b[14] = data12; - b[15] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b[ 4] = data02; - b[ 5] = data06; - b[ 6] = ZERO; - b[ 7] = ZERO; - - b[ 8] = data03; - b[ 9] = data07; - b[10] = data11; - b[11] = ZERO; - b[12] = data04; - b[13] = data08; - b[14] = data12; - b[15] = data16; + b[ 0] = *(ao1 + 0); #endif - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - } - - X += 4; - i --; - } while (i > 0); - } - - i = (m & 3); - if (i) { - - if (X > posY) { - - if (m & 2) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao2 + 0); - data04 = *(ao2 + 1); - data05 = *(ao3 + 0); - data06 = *(ao3 + 1); - data07 = *(ao4 + 0); - data08 = *(ao4 + 1); - - b[ 0] = data01; - b[ 1] = data03; - b[ 2] = data05; - b[ 3] = data07; - b[ 4] = data02; - b[ 5] = data04; - b[ 6] = data06; - b[ 7] = data08; - - ao1 += 2; - ao2 += 2; - ao3 += 2; - ao4 += 2; - b += 8; - } - - if (m & 1) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); - data03 = *(ao3 + 0); - data04 = *(ao4 + 0); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - - ao1 += 1; - ao2 += 1; - ao3 += 1; - ao4 += 1; - b += 4; - } - - } else - if (X < posY) { - if (m & 2) { - ao1 += 2 * lda; - ao2 += 2 * lda; - - b += 8; - } - - if (m & 1) { - ao1 += lda; - b += 4; - } - - } else { + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + + b[ 6] = *(ao1 + 1); +#ifdef UNIT + b[ 7] = ONE; +#else + b[ 7] = *(ao2 + 1); +#endif + b[ 8] = ZERO; + b[ 9] = ZERO; + b[10] = ZERO; + b[11] = ZERO; + + b[12] = *(ao1 + 2); + b[13] = *(ao2 + 2); +#ifdef UNIT + b[14] = ONE; +#else + b[14] = *(ao3 + 2); +#endif + b[15] = ZERO; + b[16] = ZERO; + b[17] = ZERO; + + b[18] = *(ao1 + 3); + b[19] = *(ao2 + 3); + b[20] = *(ao3 + 3); +#ifdef UNIT + b[21] = ONE; +#else + b[21] = *(ao4 + 3); +#endif + b[22] = ZERO; + b[23] = ZERO; + + b[24] = *(ao1 + 4); + b[25] = *(ao2 + 4); + b[26] = *(ao3 + 4); + b[27] = *(ao4 + 4); +#ifdef UNIT + b[28] = ONE; +#else + b[28] = *(ao5 + 4); +#endif + b[29] = ZERO; + + b[30] = *(ao1 + 5); + b[31] = *(ao2 + 5); + b[32] = *(ao3 + 5); + b[33] = *(ao4 + 5); + b[34] = *(ao5 + 5); +#ifdef UNIT + b[35] = ONE; +#else + b[35] = *(ao6 + 5); +#endif + ao1 += 6; + ao2 += 6; + ao3 += 6; + ao4 += 6; + ao5 += 6; + ao6 += 6; + b += 36; + } + + X += 6; + i --; + } while (i > 0); + } + + i = (m % 6); + if (i) { + + if (X > posY) { + for (ii = 0; ii < i; ii++){ + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + ao5 ++; + ao6 ++; + b += 6; + } + + } else if (X < posY) { + + b += 6 * i; + + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + + if (i >= 2) { + b[ 0] = *(ao1 + 1); +#ifdef UNIT + b[ 1] = ONE; +#else + b[ 1] = *(ao2 + 1); +#endif + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 3) { + b[ 0] = *(ao1 + 2); + b[ 1] = *(ao2 + 2); +#ifdef UNIT + b[ 2] = ONE; +#else + b[ 2] = *(ao3 + 2); +#endif + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 4) { + b[ 0] = *(ao1 + 3); + b[ 1] = *(ao2 + 3); + b[ 2] = *(ao3 + 3); +#ifdef UNIT + b[ 3] = ONE; +#else + b[ 3] = *(ao4 + 3); +#endif + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 5) { + b[ 0] = *(ao1 + 4); + b[ 1] = *(ao2 + 4); + b[ 2] = *(ao3 + 4); + b[ 3] = *(ao4 + 4); +#ifdef UNIT + b[ 4] = ONE; +#else + b[ 4] = *(ao5 + 4); +#endif + b[ 5] = ZERO; + b += 6; + } + } + } + + posY += 6; + js --; + } while (js > 0); + } /* End of main loop */ + + if ((n % 6) & 4){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X > posY) { + for (ii = 0; ii < 2; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + b += 4; + } + } else if (X < posY) { + ao1 += 2 * lda; + ao2 += 2 * lda; + ao3 += 2 * lda; + ao4 += 2 * lda; + b += 8; + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + + b[ 4] = *(ao1 + 1); +#ifdef UNIT + b[ 5] = ONE; +#else + b[ 5] = *(ao2 + 1); +#endif + b[ 6] = ZERO; + b[ 7] = ZERO; + + b[ 8] = *(ao1 + 2); + b[ 9] = *(ao2 + 2); +#ifdef UNIT + b[ 10] = ONE; +#else + b[ 10] = *(ao3 + 2); +#endif + b[ 11] = ZERO; + + b[ 12] = *(ao1 + 3); + b[ 13] = *(ao2 + 3); + b[ 14] = *(ao3 + 3); +#ifdef UNIT + b[ 15] = ONE; +#else + b[ 15] = *(ao4 + 3); +#endif + + ao1 += 4; + ao2 += 4; + ao3 += 4; + ao4 += 4; + b += 16; + X += 4; + i -= 2; + continue; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X > posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + b += 4; + } + } else if (X < posY) { + /* ao1 += i * lda; + ao2 += i * lda; + ao3 += i * lda; + ao4 += i * lda; */ + b += 4 * i; + } else { #ifdef UNIT - data05 = *(ao2 + 0); - data09 = *(ao3 + 0); - data13 = *(ao4 + 0); - - if (i >= 2) { - data10 = *(ao3 + 1); - data14 = *(ao4 + 1); - } - - if (i >= 3) { - data15 = *(ao4 + 2); - } - - b[ 0] = ONE; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = ONE; - b[ 2] = data10; - b[ 3] = data14; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = ONE; - b[ 3] = data15; - b += 4; - } + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - data09 = *(ao3 + 0); - data13 = *(ao4 + 0); - - if (i >= 2) { - data06 = *(ao2 + 1); - data10 = *(ao3 + 1); - data14 = *(ao4 + 1); - } - - if (i >= 3) { - data11 = *(ao3 + 2); - data15 = *(ao4 + 2); - } - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = data06; - b[ 2] = data10; - b[ 3] = data14; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = data11; - b[ 3] = data15; - b += 4; - } + b[ 0] = *(ao1 + 0); #endif - } - } - - posY += 4; - js --; - } while (js > 0); - } /* End of main loop */ - - - if (n & 2){ - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - } - - i = (m >> 1); - if (i > 0) { - do { - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data02; - b[ 3] = data06; - - ao1 += 2; - ao2 += 2; - b += 4; - - } else - if (X < posY) { - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 4; - } else { + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b += 4; + } + } + + posY += 4; + } + + + if ((n % 6) & 2){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X > posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data05; + b[ 2] = data02; + b[ 3] = data06; + + ao1 += 2; + ao2 += 2; + b += 4; + + } else if (X < posY) { + ao1 += 2 * lda; + ao2 += 2 * lda; + b += 4; + } else { #ifdef UNIT - data02 = *(ao1 + 1); + data02 = *(ao1 + 1); - b[ 0] = ONE; - b[ 1] = ZERO; - b[ 2] = data02; - b[ 3] = ONE; + b[ 0] = ONE; + b[ 1] = ZERO; + b[ 2] = data02; + b[ 3] = ONE; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = ZERO; - b[ 2] = data02; - b[ 3] = data06; + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = ZERO; + b[ 2] = data02; + b[ 3] = data06; #endif - ao1 += 2; - ao2 += 2; - - b += 4; - } - - X += 2; - i --; - } while (i > 0); - } - - i = (m & 1); - if (i) { - - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao2 + 0); - b[ 0] = data01; - b[ 1] = data02; - - ao1 += 1; - ao2 += 1; - b += 2; - } else - if (X < posY) { - ao1 += lda; - b += 2; - } else { + ao1 += 2; + ao2 += 2; + + b += 4; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X > posY) { + data01 = *(ao1 + 0); + data02 = *(ao2 + 0); + b[ 0] = data01; + b[ 1] = data02; + + ao1 += 1; + ao2 += 1; + b += 2; + } else if (X < posY) { + ao1 += lda; + b += 2; + } else { #ifdef UNIT - data05 = *(ao2 + 0); + data05 = *(ao2 + 0); - b[ 0] = ONE; - b[ 1] = data05; + b[ 0] = ONE; + b[ 1] = data05; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); + data01 = *(ao1 + 0); + data05 = *(ao2 + 0); - b[ 0] = data01; - b[ 1] = data05; + b[ 0] = data01; + b[ 1] = data05; #endif - b += 2; - } - } - posY += 2; - } - - if (n & 1){ - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - } - - i = m; - if (i > 0) { - do { - if (X > posY) { - data01 = *(ao1 + 0); - b[ 0] = data01; - b += 1; - ao1 += 1; - } else - if (X < posY) { - b += 1; - ao1 += lda; - } else { + b += 2; + } + } + posY += 2; + } + + if ((n % 6) & 1){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + } + + i = m; + if (i > 0) { + do { + if (X > posY) { + data01 = *(ao1 + 0); + b[ 0] = data01; + b += 1; + ao1 += 1; + } else if (X < posY) { + b += 1; + ao1 += lda; + } else { #ifdef UNIT - b[ 0] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - b[ 0] = data01; + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - b += 1; - ao1 += 1; - } + b += 1; + ao1 += 1; + } - X ++; - i --; - } while (i > 0); - } + X ++; + i --; + } while (i > 0); + } - posY += 1; - } + posY += 1; + } - return 0; + return 0; } diff --git a/kernel/generic/trmm_ltcopy_6.c b/kernel/generic/trmm_ltcopy_6.c index 66a7325bb1..7c22450369 100644 --- a/kernel/generic/trmm_ltcopy_6.c +++ b/kernel/generic/trmm_ltcopy_6.c @@ -41,448 +41,511 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js; - BLASLONG X; - - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *ao1, *ao2, *ao3, *ao4; - - js = (n >> 2); - - if (js > 0){ - do { - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - ao3 = a + posY + (posX + 2) * lda; - ao4 = a + posY + (posX + 3) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - ao3 = a + posX + (posY + 2) * lda; - ao4 = a + posX + (posY + 3) * lda; - } - - i = (m >> 2); - if (i > 0) { - do { - if (X > posY) { - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - - } else - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b[ 4] = data05; - b[ 5] = data06; - b[ 6] = data07; - b[ 7] = data08; - - b[ 8] = data09; - b[ 9] = data10; - b[10] = data11; - b[11] = data12; - b[12] = data13; - b[13] = data14; - b[14] = data15; - b[15] = data16; - - ao1 += 4 * lda; - ao2 += 4 * lda; - ao3 += 4 * lda; - ao4 += 4 * lda; - b += 16; - - } else { + BLASLONG i, js, ii; + BLASLONG X; + + FLOAT data01, data02, data05, data06; + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = (n / 6); + + if (js > 0){ + do { + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + ao5 = a + posY + (posX + 4) * lda; + ao6 = a + posY + (posX + 5) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + ao5 = a + posX + (posY + 4) * lda; + ao6 = a + posX + (posY + 5) * lda; + } + + i = (m / 6); + if (i > 0) { + do { + if (X > posY) { + ao1 += 6; + ao2 += 6; + ao3 += 6; + ao4 += 6; + ao5 += 6; + ao6 += 6; + b += 36; + + } else if (X < posY) { + for (ii = 0; ii < 6; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + + ao1 += lda; + b += 6; + } + + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + + b[ 6] = ZERO; +#ifdef UNIT + b[ 7] = ONE; +#else + b[ 7] = *(ao2 + 1); +#endif + b[ 8] = *(ao2 + 2); + b[ 9] = *(ao2 + 3); + b[10] = *(ao2 + 4); + b[11] = *(ao2 + 5); + + b[12] = ZERO; + b[13] = ZERO; +#ifdef UNIT + b[14] = ONE; +#else + b[14] = *(ao3 + 2); +#endif + b[15] = *(ao3 + 3); + b[16] = *(ao3 + 4); + b[17] = *(ao3 + 5); + + b[18] = ZERO; + b[19] = ZERO; + b[20] = ZERO; +#ifdef UNIT + b[21] = ONE; +#else + b[21] = *(ao4 + 3); +#endif + b[22] = *(ao4 + 4); + b[23] = *(ao4 + 5); + + b[24] = ZERO; + b[25] = ZERO; + b[26] = ZERO; + b[27] = ZERO; +#ifdef UNIT + b[28] = ONE; +#else + b[28] = *(ao5 + 4); +#endif + b[29] = *(ao5 + 5); + + b[30] = ZERO; + b[31] = ZERO; + b[32] = ZERO; + b[33] = ZERO; + b[34] = ZERO; +#ifdef UNIT + b[35] = ONE; +#else + b[35] = *(ao6 + 5); +#endif + + ao1 += 6; + ao2 += 6; + ao3 += 6; + ao4 += 6; + ao5 += 6; + ao6 += 6; + b += 36; + } + + X += 6; + i --; + } while (i > 0); + } + + i = (m % 6); + if (i) { + + if (X > posY) { + + b += 6 * i; + + } else if (X < posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + + ao1 += lda; + ao2 += lda; + ao3 += lda; + ao4 += lda; + ao5 += lda; + ao6 += lda; + b += 6; + } + + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + b += 6; + + if (i >= 2) { + b[ 0] = ZERO; +#ifdef UNIT + b[ 1] = ONE; +#else + b[ 1] = *(ao2 + 1); +#endif + b[ 2] = *(ao2 + 2); + b[ 3] = *(ao2 + 3); + b[ 4] = *(ao2 + 4); + b[ 5] = *(ao2 + 5); + b += 6; + } + + if (i >= 3) { + b[ 0] = ZERO; + b[ 1] = ZERO; +#ifdef UNIT + b[ 2] = ONE; +#else + b[ 2] = *(ao3 + 2); +#endif + b[ 3] = *(ao3 + 3); + b[ 4] = *(ao3 + 4); + b[ 5] = *(ao3 + 5); + b += 6; + } + + if (i >= 4) { + b[ 0] = ZERO; + b[ 1] = ZERO; + b[ 2] = ZERO; +#ifdef UNIT + b[ 3] = ONE; +#else + b[ 3] = *(ao4 + 3); +#endif + b[ 4] = *(ao4 + 4); + b[ 5] = *(ao4 + 5); + b += 6; + } + + if (i >= 5) { + b[ 0] = ZERO; + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; +#ifdef UNIT + b[ 4] = ONE; +#else + b[ 4] = *(ao5 + 4); +#endif + b[ 5] = *(ao5 + 5); + b += 6; + } + } + } + + posY += 6; + js --; + } while (js > 0); + } /* End of main loop */ + + if ((n % 6) & 4){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X > posY) { + ao1 += 2; + ao2 += 2; + ao3 += 2; + ao4 += 2; + b += 8; + } else if (X < posY) { + + for (ii = 0; ii < 2; ii++){ + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + ao1 += lda; + b += 4; + } + + ao2 += 2 * lda; + ao3 += 2 * lda; + ao4 += 2 * lda; + } else { + +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + + b[ 4] = ZERO; +#ifdef UNIT + b[ 5] = ONE; +#else + b[ 5] = *(ao2 + 1); +#endif + b[ 6] = *(ao2 + 2); + b[ 7] = *(ao2 + 3); + b[ 8] = ZERO; + b[ 9] = ZERO; #ifdef UNIT - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data12 = *(ao3 + 3); - - b[ 0] = ONE; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - - b[ 4] = ZERO; - b[ 5] = ONE; - b[ 6] = data07; - b[ 7] = data08; - - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = ONE; - b[11] = data12; - - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = ONE; + b[ 10] = ONE; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b[ 4] = ZERO; - b[ 5] = data06; - b[ 6] = data07; - b[ 7] = data08; - - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = data11; - b[11] = data12; - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = data16; + b[ 10] = *(ao3 + 2); #endif - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - } - - X += 4; - i --; - } while (i > 0); - } - - i = (m & 3); - if (i) { - - if (X > posY) { - - if (m & 2) { - ao1 += 2; - ao2 += 2; - ao3 += 2; - ao4 += 2; - b += 8; - } - - if (m & 1) { - ao1 += 1; - ao2 += 1; - ao3 += 1; - ao4 += 1; - b += 4; - } - - } else - if (X < posY) { - if (m & 2) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b[ 4] = data05; - b[ 5] = data06; - b[ 6] = data07; - b[ 7] = data08; - - ao1 += 2 * lda; - ao2 += 2 * lda; - - b += 8; - } - - if (m & 1) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - - ao1 += lda; - b += 4; - } - - } else { + b[ 11] = *(ao3 + 3); + b[ 12] = ZERO; + b[ 13] = ZERO; + b[ 14] = ZERO; +#ifdef UNIT + b[ 15] = ONE; +#else + b[ 15] = *(ao4 + 3); +#endif + ao1 += 4; + ao2 += 4; + ao3 += 4; + ao4 += 4; + b += 16; + X += 4; + i -= 2; + continue; + } + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i > 0) { + if (X > posY) { + /* ao1 += i; + ao2 += i; + ao3 += i; + ao4 += i; */ + b += 4 * i; + } else if (X < posY) { + + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + + // ao1 += lda; + // ao2 += lda; + // ao3 += lda; + // ao4 += lda; + b += 4; + } + } else { #ifdef UNIT - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - if (i >= 2) { - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - } - - if (i >= 3) { - data12 = *(ao3 + 3); - } - - b[ 0] = ONE; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = ONE; - b[ 2] = data07; - b[ 3] = data08; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = ONE; - b[ 3] = data12; - b += 4; - } + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - if (i >= 2) { - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - } - - if (i >= 3) { - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - } - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = data06; - b[ 2] = data07; - b[ 3] = data08; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = data11; - b[ 3] = data12; - b += 4; - } + b[ 0] = *(ao1 + 0); #endif - } - } - - posY += 4; - js --; - } while (js > 0); - } /* End of main loop */ - - - if (n & 2){ - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - } - - i = (m >> 1); - if (i > 0) { - do { - if (X > posY) { - ao1 += 2; - ao2 += 2; - b += 4; - - } else - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data05; - b[ 3] = data06; - - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 4; - } else { + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b += 4; + } + } + posY += 4; + } + + + if ((n % 6) & 2){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X > posY) { + ao1 += 2; + ao2 += 2; + b += 4; + + } else if (X < posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data05; + b[ 3] = data06; + + ao1 += 2 * lda; + ao2 += 2 * lda; + b += 4; + } else { #ifdef UNIT - data02 = *(ao1 + 1); + data02 = *(ao1 + 1); - b[ 0] = ONE; - b[ 1] = data02; - b[ 2] = ZERO; - b[ 3] = ONE; + b[ 0] = ONE; + b[ 1] = data02; + b[ 2] = ZERO; + b[ 3] = ONE; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = ZERO; - b[ 3] = data06; + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = ZERO; + b[ 3] = data06; #endif - ao1 += 2; - ao2 += 2; - b += 4; - } - - X += 2; - i --; - } while (i > 0); - } - - i = (m & 1); - if (i) { - - if (X > posY) { - ao1 += 1; - ao2 += 1; - - b += 2; - } else - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - - b[ 0] = data01; - b[ 1] = data02; - ao1 += lda; - b += 2; - } else { + ao1 += 2; + ao2 += 2; + b += 4; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X > posY) { + ao1 += 1; + ao2 += 1; + + b += 2; + } else if (X < posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + + b[ 0] = data01; + b[ 1] = data02; + ao1 += lda; + b += 2; + } else { #ifdef UNIT - data02 = *(ao1 + 1); + data02 = *(ao1 + 1); - b[ 0] = ONE; - b[ 1] = data02; + b[ 0] = ONE; + b[ 1] = data02; #else - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); - b[ 0] = data01; - b[ 1] = data02; + b[ 0] = data01; + b[ 1] = data02; #endif - b += 2; - } - } - posY += 2; - } - - if (n & 1){ - X = posX; - - if (posX <= posY) { - ao1 = a + posY + (posX + 0) * lda; - } else { - ao1 = a + posX + (posY + 0) * lda; - } - - i = m; - if (i > 0) { - do { - if (X > posY) { - b += 1; - ao1 += 1; - } else - if (X < posY) { - data01 = *(ao1 + 0); - b[ 0] = data01; - ao1 += lda; - b += 1; - } else { + b += 2; + } + } + posY += 2; + } + + if ((n % 6) & 1){ + X = posX; + + if (posX <= posY) { + ao1 = a + posY + (posX + 0) * lda; + } else { + ao1 = a + posX + (posY + 0) * lda; + } + + i = m; + if (i > 0) { + do { + if (X > posY) { + b += 1; + ao1 += 1; + } else if (X < posY) { + data01 = *(ao1 + 0); + b[ 0] = data01; + ao1 += lda; + b += 1; + } else { #ifdef UNIT - b[ 0] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - b[ 0] = data01; + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - ao1 += 1; - b += 1; - } + ao1 += 1; + b += 1; + } - X ++; - i --; - } while (i > 0); - } + X ++; + i --; + } while (i > 0); + } - posY += 1; - } + posY += 1; + } - return 0; + return 0; } diff --git a/kernel/generic/trmm_uncopy_6.c b/kernel/generic/trmm_uncopy_6.c index 4878f3f530..9521cc7243 100644 --- a/kernel/generic/trmm_uncopy_6.c +++ b/kernel/generic/trmm_uncopy_6.c @@ -41,745 +41,544 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js; - BLASLONG X, mm; - - FLOAT data01, data02, data03, data04, data05, data06; - FLOAT data07, data08, data09, data10, data11, data12; - FLOAT data13, data14, data15, data16, data17, data18; - FLOAT data19, data20, data21, data22, data23, data24; - FLOAT data25, data26, data27, data28, data29, data30; - FLOAT data31, data32, data33, data34, data35, data36; - - FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; - - //js = (n >> 2); - js = n/6; - if (js > 0){ - do { - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - ao3 = a + posX + (posY + 2) * lda; - ao4 = a + posX + (posY + 3) * lda; - ao5 = a + posX + (posY + 4) * lda; - ao6 = a + posX + (posY + 5) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - ao3 = a + posY + (posX + 2) * lda; - ao4 = a + posY + (posX + 3) * lda; - ao5 = a + posY + (posX + 4) * lda; - ao6 = a + posY + (posX + 5) * lda; - } - - i = m/6; - if (i > 0) { - do { - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - data05 = *(ao1 + 4); - data06 = *(ao1 + 5); - - data07 = *(ao2 + 0); - data08 = *(ao2 + 1); - data09 = *(ao2 + 2); - data10 = *(ao2 + 3); - data11 = *(ao2 + 4); - data12 = *(ao2 + 5); - - data13 = *(ao3 + 0); - data14 = *(ao3 + 1); - data15 = *(ao3 + 2); - data16 = *(ao3 + 3); - data17 = *(ao3 + 4); - data18 = *(ao3 + 5); - - data19 = *(ao4 + 0); - data20 = *(ao4 + 1); - data21 = *(ao4 + 2); - data22 = *(ao4 + 3); - data23 = *(ao4 + 4); - data24 = *(ao4 + 5); - - data25 = *(ao5 + 0); - data26 = *(ao5 + 1); - data27 = *(ao5 + 2); - data28 = *(ao5 + 3); - data29 = *(ao5 + 4); - data30 = *(ao5 + 5); - - data31 = *(ao6 + 0); - data32 = *(ao6 + 1); - data33 = *(ao6 + 2); - data34 = *(ao6 + 3); - data35 = *(ao6 + 4); - data36 = *(ao6 + 5); - - b[ 0] = data01; - b[ 1] = data07; - b[ 2] = data13; - b[ 3] = data19; - b[ 4] = data25; - b[ 5] = data31; - - b[ 6] = data02; - b[ 7] = data08; - b[ 8] = data14; - b[ 9] = data20; - b[10] = data26; - b[11] = data32; - - b[12] = data03; - b[13] = data09; - b[14] = data15; - b[15] = data21; - b[16] = data27; - b[17] = data33; - - b[18] = data04; - b[19] = data10; - b[20] = data16; - b[21] = data22; - b[22] = data28; - b[23] = data34; - - b[24] = data05; - b[25] = data11; - b[26] = data17; - b[27] = data23; - b[28] = data29; - b[29] = data35; - - b[30] = data06; - b[31] = data12; - b[32] = data18; - b[33] = data24; - b[34] = data30; - b[35] = data36; - - ao1 += 6; - ao2 += 6; - ao3 += 6; - ao4 += 6; - ao5 += 6; - ao6 += 6; - b += 36; - } else - if (X > posY) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b[ 4] = ZERO; - b[ 5] = ZERO; - b[ 6] = ZERO; - b[ 7] = ZERO; - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = ZERO; - b[11] = ZERO; - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = ZERO; - b[16] = ZERO; - b[17] = ZERO; - b[18] = ZERO; - b[19] = ZERO; - b[20] = ZERO; - b[21] = ZERO; - b[22] = ZERO; - b[23] = ZERO; - b[24] = ZERO; - b[25] = ZERO; - b[26] = ZERO; - b[27] = ZERO; - b[28] = ZERO; - b[29] = ZERO; - b[30] = ZERO; - b[31] = ZERO; - b[32] = ZERO; - b[33] = ZERO; - b[34] = ZERO; - b[35] = ZERO; - - ao1 += 6 * lda; - ao2 += 6 * lda; - ao3 += 6 * lda; - ao4 += 6 * lda; - ao5 += 6 * lda; - ao6 += 6 * lda; - - b += 36; - } else { - data01 = *(ao1 + 0); - data07 = *(ao2 + 0); - data13 = *(ao3 + 0); - data19 = *(ao4 + 0); - data25 = *(ao5 + 0); - data31 = *(ao6 + 0); - - data08 = *(ao2 + 1); - data14 = *(ao3 + 1); - data20 = *(ao4 + 1); - data26 = *(ao5 + 1); - data32 = *(ao6 + 1); - - data15 = *(ao3 + 2); - data21 = *(ao4 + 2); - data27 = *(ao5 + 2); - data33 = *(ao6 + 2); - - data22 = *(ao4 + 3); - data28 = *(ao5 + 3); - data34 = *(ao6 + 3); - - data29 = *(ao5 + 4); - data35 = *(ao6 + 4); - - data36 = *(ao6 + 5); + BLASLONG i, js, ii; + BLASLONG X; + + FLOAT data01, data02, data05, data06; + + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = n/6; + if (js > 0){ + do { + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + ao5 = a + posX + (posY + 4) * lda; + ao6 = a + posX + (posY + 5) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + ao5 = a + posY + (posX + 4) * lda; + ao6 = a + posY + (posX + 5) * lda; + } + + i = m/6; + if (i > 0) { + do { + if (X < posY) { + for (ii = 0; ii < 6; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + ao5 ++; + ao6 ++; + b += 6; + } + } else if (X > posY) { + // b[ 0] = ZERO; + // b[ 1] = ZERO; + // b[ 2] = ZERO; + // b[ 3] = ZERO; + // b[ 4] = ZERO; + // b[ 5] = ZERO; + // b[ 6] = ZERO; + // b[ 7] = ZERO; + // b[ 8] = ZERO; + // b[ 9] = ZERO; + // b[10] = ZERO; + // b[11] = ZERO; + // b[12] = ZERO; + // b[13] = ZERO; + // b[14] = ZERO; + // b[15] = ZERO; + // b[16] = ZERO; + // b[17] = ZERO; + // b[18] = ZERO; + // b[19] = ZERO; + // b[20] = ZERO; + // b[21] = ZERO; + // b[22] = ZERO; + // b[23] = ZERO; + // b[24] = ZERO; + // b[25] = ZERO; + // b[26] = ZERO; + // b[27] = ZERO; + // b[28] = ZERO; + // b[29] = ZERO; + // b[30] = ZERO; + // b[31] = ZERO; + // b[32] = ZERO; + // b[33] = ZERO; + // b[34] = ZERO; + // b[35] = ZERO; + + ao1 += 6 * lda; + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + + b += 36; + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + + b[ 6] = ZERO; +#ifdef UNIT + b[ 7] = ONE; +#else + b[ 7] = *(ao2 + 1); +#endif + b[ 8] = *(ao3 + 1); + b[ 9] = *(ao4 + 1); + b[10] = *(ao5 + 1); + b[11] = *(ao6 + 1); + + b[ 12] = ZERO; + b[ 13] = ZERO; +#ifdef UNIT + b[ 14] = ONE; +#else + b[ 14] = *(ao3 + 2); +#endif + b[ 15] = *(ao4 + 2); + b[ 16] = *(ao5 + 2); + b[ 17] = *(ao6 + 2); + + b[ 18] = ZERO; + b[ 19] = ZERO; + b[ 20] = ZERO; +#ifdef UNIT + b[ 21] = ONE; +#else + b[ 21] = *(ao4 + 3); +#endif + b[ 22] = *(ao5 + 3); + b[ 23] = *(ao6 + 3); + + b[ 24] = ZERO; + b[ 25] = ZERO; + b[ 26] = ZERO; + b[ 27] = ZERO; +#ifdef UNIT + b[ 28] = ONE; +#else + b[ 28] = *(ao5 + 4); +#endif + b[ 29] = *(ao6 + 4); + + b[ 30] = ZERO; + b[ 31] = ZERO; + b[ 32] = ZERO; + b[ 33] = ZERO; + b[ 34] = ZERO; +#ifdef UNIT + b[ 35] = ONE; +#else + b[ 35] = *(ao6 + 5); +#endif + ao1 += 6 * lda; + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + + b += 36; + } + X += 6; + i --; + } while (i > 0); + } + i = m % 6; + if (i) { + if (X < posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + ao5 ++; + ao6 ++; + b += 6; + } + } else if (X > posY) { + b += 6 * i; + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = *(ao5 + 0); + b[ 5] = *(ao6 + 0); + b += 6; + + if (i >= 2) { + b[ 0] = ZERO; +#ifdef UNIT + b[ 1] = ONE; +#else + b[ 1] = *(ao2 + 1); +#endif + b[ 2] = *(ao3 + 1); + b[ 3] = *(ao4 + 1); + b[ 4] = *(ao5 + 1); + b[ 5] = *(ao6 + 1); + b += 6; + } + + if (i >= 3) { + b[ 0] = ZERO; + b[ 1] = ZERO; +#ifdef UNIT + b[ 2] = ONE; +#else + b[ 2] = *(ao3 + 2); +#endif + b[ 3] = *(ao4 + 2); + b[ 4] = *(ao5 + 2); + b[ 5] = *(ao6 + 2); + b += 6; + } + + if (i >= 4) { + b[ 0] = ZERO; + b[ 1] = ZERO; + b[ 2] = ZERO; +#ifdef UNIT + b[ 3] = ONE; +#else + b[ 3] = *(ao4 + 3); +#endif + b[ 4] = *(ao5 + 3); + b[ 5] = *(ao6 + 3); + b += 6; + } + + if (i >= 5) { + b[ 0] = ZERO; + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; +#ifdef UNIT + b[ 4] = ONE; +#else + b[ 4] = *(ao5 + 4); +#endif + b[ 5] = *(ao6 + 4); + b += 6; + } + } + } + posY += 6; + js --; + } while (js > 0); + } /* End of main loop */ + + + if ((n % 6) & 4){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X < posY) { + for (ii = 0; ii < 2; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + b += 4; + } + } else if (X > posY) { + ao1 += 2 * lda; + ao2 += 2 * lda; + ao3 += 2 * lda; + ao4 += 2 * lda; + b += 8; + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b[ 4] = ZERO; #ifdef UNIT - b[ 0] = ONE; - b[ 1] = data07; - b[ 2] = data13; - b[ 3] = data19; - b[ 4] = data25; - b[ 5] = data31; - - b[ 6] = ZERO; - b[ 7] = ONE; - b[ 8] = data14; - b[ 9] = data20; - b[10] = data26; - b[11] = data32; - - b[12] = ZERO; - b[13] = ZERO; - b[14] = ONE; - b[15] = data21; - b[16] = data27; - b[17] = data33; - - b[18] = ZERO; - b[19] = ZERO; - b[20] = ZERO; - b[21] = ONE; - b[22] = data28; - b[23] = data34; - - b[24] = ZERO; - b[25] = ZERO; - b[26] = ZERO; - b[27] = ZERO; - b[28] = ONE; - b[29] = data35; - - b[30] = ZERO; - b[31] = ZERO; - b[32] = ZERO; - b[33] = ZERO; - b[34] = ZERO; - b[35] = ONE; + b[ 5] = ONE; #else - b[ 0] = data01; - b[ 1] = data07; - b[ 2] = data13; - b[ 3] = data19; - b[ 4] = data25; - b[ 5] = data31; - - b[ 6] = ZERO; - b[ 7] = data08; - b[ 8] = data14; - b[ 9] = data20; - b[10] = data26; - b[11] = data32; - - b[12] = ZERO; - b[13] = ZERO; - b[14] = data15; - b[15] = data21; - b[16] = data27; - b[17] = data33; - - b[18] = ZERO; - b[19] = ZERO; - b[20] = ZERO; - b[21] = data22; - b[22] = data28; - b[23] = data34; - - b[24] = ZERO; - b[25] = ZERO; - b[26] = ZERO; - b[27] = ZERO; - b[28] = data29; - b[29] = data35; - - b[30] = ZERO; - b[31] = ZERO; - b[32] = ZERO; - b[33] = ZERO; - b[34] = ZERO; - b[35] = data36; + b[ 5] = *(ao2 + 1); #endif + b[ 6] = *(ao3 + 1); + b[ 7] = *(ao4 + 1); - ao1 += 6; - ao2 += 6; - ao3 += 6; - ao4 += 6; - ao5 += 6; - ao6 += 7; - - b += 36; - } - X += 6; - i --; - } while (i > 0); - } - mm = m - m/6; - if (mm & 4) { - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b[ 4] = data02; - b[ 5] = data06; - b[ 6] = data10; - b[ 7] = data14; - - b[ 8] = data03; - b[ 9] = data07; - b[10] = data11; - b[11] = data15; - b[12] = data04; - b[13] = data08; - b[14] = data12; - b[15] = data16; - - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - } else - if (X > posY) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b[ 4] = ZERO; - b[ 5] = ZERO; - b[ 6] = ZERO; - b[ 7] = ZERO; - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = ZERO; - b[11] = ZERO; - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = ZERO; - b[16] = ZERO; - b[17] = ZERO; - b[18] = ZERO; - b[19] = ZERO; - b[20] = ZERO; - b[21] = ZERO; - b[22] = ZERO; - b[23] = ZERO; - - ao1 += 4 * lda; - ao2 += 4 * lda; - ao3 += 4 * lda; - ao4 += 4 * lda; - - b += 16; - } else { + b[ 8] = ZERO; + b[ 9] = ZERO; #ifdef UNIT - data05 = *(ao2 + 0); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - - b[ 0] = ONE; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - - b[ 4] = ZERO; - b[ 5] = ONE; - b[ 6] = data10; - b[ 7] = data14; - - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = ONE; - b[11] = data15; - - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = ONE; + b[ 10] = ONE; #else - data01 = *(ao1 + 0); - - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - - b[ 4] = ZERO; - b[ 5] = data06; - b[ 6] = data10; - b[ 7] = data14; - - b[ 8] = ZERO; - b[ 9] = ZERO; - b[10] = data11; - b[11] = data15; - - b[12] = ZERO; - b[13] = ZERO; - b[14] = ZERO; - b[15] = data16; + b[ 10] = *(ao3 + 2); #endif - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - - b += 16; - } - X += 4; - } - - if (mm & 3) { - if (X < posY) { - if (mm & 2) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao2 + 0); - data04 = *(ao2 + 1); - data05 = *(ao3 + 0); - data06 = *(ao3 + 1); - data07 = *(ao4 + 0); - data08 = *(ao4 + 1); - - b[ 0] = data01; - b[ 1] = data03; - b[ 2] = data05; - b[ 3] = data07; - b[ 4] = data02; - b[ 5] = data04; - b[ 6] = data06; - b[ 7] = data08; - - ao1 += 2; - ao2 += 2; - ao3 += 2; - ao4 += 2; - b += 8; - } - - if (mm & 1) { - data01 = *(ao1 + 0); - data03 = *(ao2 + 0); - data05 = *(ao3 + 0); - data07 = *(ao4 + 0); - - b[ 0] = data01; - b[ 1] = data03; - b[ 2] = data05; - b[ 3] = data07; - - ao1 += 1; - ao2 += 1; - ao3 += 1; - ao4 += 1; - b += 4; - } - - } else - if (X > posY) { - if (m & 2) { - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 8; - } - - if (m & 1) { - ao1 += lda; - b += 4; - } - - } else { + b[ 11] = *(ao4 + 2); + + b[ 12] = ZERO; + b[ 13] = ZERO; + b[ 14] = ZERO; +#ifdef UNIT + b[ 15] = ONE; +#else + b[ 15] = *(ao4 + 3); +#endif + + ao1 += 4 * lda; + ao2 += 4 * lda; + ao3 += 4 * lda; + ao4 += 4 * lda; + b += 16; + X += 4; + i -= 2; + continue; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X < posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + + ao1 ++; + ao2 ++; + ao3 ++; + ao4 ++; + b += 4; + } + } else if (X > posY) { + /* ao1 += i * lda; + ao2 += i * lda; + ao3 += i * lda; + ao4 += i * lda; */ + b += 4 * i; + } else { #ifdef UNIT - data05 = *(ao2 + 0); - data09 = *(ao3 + 0); - data13 = *(ao4 + 0); - - if (i >= 2) { - data10 = *(ao3 + 1); - data14 = *(ao4 + 1); - } - - if (i >= 3) { - data15 = *(ao4 + 2); - } - - b[ 0] = ONE; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = ONE; - b[ 2] = data10; - b[ 3] = data14; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = ONE; - b[ 3] = data15; - b += 4; - } + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - data09 = *(ao3 + 0); - data13 = *(ao4 + 0); - - if (i >= 2) { - data06 = *(ao2 + 1); - data10 = *(ao3 + 1); - data14 = *(ao4 + 1); - } - - if (i >= 3) { - data11 = *(ao3 + 2); - data15 = *(ao4 + 2); - } - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data09; - b[ 3] = data13; - b += 4; - - if(i >= 2) { - b[ 0] = ZERO; - b[ 1] = data06; - b[ 2] = data10; - b[ 3] = data14; - b += 4; - } - - if (i >= 3) { - b[ 0] = ZERO; - b[ 1] = ZERO; - b[ 2] = data11; - b[ 3] = data15; - b += 4; - } + b[ 0] = *(ao1 + 0); #endif - } - } - - posY += 4; - js --; - } while (js > 0); - } /* End of main loop */ - - if (n & 2){ - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - } - - i = (m >> 1); - if (i > 0) { - do { - if (X < posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = data02; - b[ 3] = data06; - - ao1 += 2; - ao2 += 2; - b += 4; - - } else - if (X > posY) { - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 4; - - } else { + b[ 1] = *(ao2 + 0); + b[ 2] = *(ao3 + 0); + b[ 3] = *(ao4 + 0); + b += 4; + } + } + + posY += 4; + } + + if ((n % 6) & 2){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X < posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data05; + b[ 2] = data02; + b[ 3] = data06; + + ao1 += 2; + ao2 += 2; + b += 4; + + } else if (X > posY) { + ao1 += 2 * lda; + ao2 += 2 * lda; + b += 4; + + } else { #ifdef UNIT - data05 = *(ao2 + 0); + data05 = *(ao2 + 0); - b[ 0] = ONE; - b[ 1] = data05; - b[ 2] = ZERO; - b[ 3] = ONE; + b[ 0] = ONE; + b[ 1] = data05; + b[ 2] = ZERO; + b[ 3] = ONE; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data05; - b[ 2] = ZERO; - b[ 3] = data06; + data01 = *(ao1 + 0); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data05; + b[ 2] = ZERO; + b[ 3] = data06; #endif - ao1 += 2 * lda; - ao2 += 2 * lda; - - b += 4; - } - - X += 2; - i --; - } while (i > 0); - } - - i = (m & 1); - if (i) { - - if (X < posY) { - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - - b[ 0] = data01; - b[ 1] = data05; - ao1 += 1; - ao2 += 1; - b += 2; - } else - if (X > posY) { - ao1 += lda; - ao2 += lda; - b += 2; - } else { + ao1 += 2 * lda; + ao2 += 2 * lda; + + b += 4; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X < posY) { + data01 = *(ao1 + 0); + data05 = *(ao2 + 0); + + b[ 0] = data01; + b[ 1] = data05; + ao1 += 1; + ao2 += 1; + b += 2; + } else if (X > posY) { + ao1 += lda; + ao2 += lda; + b += 2; + } else { #ifdef UNIT - data05 = *(ao2 + 0); - b[ 0] = ONE; - b[ 1] = data05; + data05 = *(ao2 + 0); + b[ 0] = ONE; + b[ 1] = data05; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); + data01 = *(ao1 + 0); + data05 = *(ao2 + 0); - b[ 0] = data01; - b[ 1] = data05; + b[ 0] = data01; + b[ 1] = data05; #endif - ao1 += lda; - ao2 += lda; - b += 2; - } - } - - posY += 2; - } - - if (n & 1){ - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - } - - i = m; - if (m > 0) { - do { - if (X < posY) { - data01 = *(ao1 + 0); - b[ 0] = data01; - ao1 += 1; - b += 1; - } else - if (X > posY) { - ao1 += lda; - b += 1; - } else { + ao1 += lda; + ao2 += lda; + b += 2; + } + } + + posY += 2; + } + + if ((n % 6) & 1){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + } + + i = m; + if (m > 0) { + do { + if (X < posY) { + data01 = *(ao1 + 0); + b[ 0] = data01; + ao1 += 1; + b += 1; + } else if (X > posY) { + ao1 += lda; + b += 1; + } else { #ifdef UNIT - b[ 0] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - b[ 0] = data01; + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - ao1 += lda; - b += 1; - } + ao1 += lda; + b += 1; + } - X += 1; - i --; - } while (i > 0); - } - } + X += 1; + i --; + } while (i > 0); + } + } - return 0; + return 0; } diff --git a/kernel/generic/trmm_utcopy_6.c b/kernel/generic/trmm_utcopy_6.c index 441f7338b5..e7ec4999f3 100644 --- a/kernel/generic/trmm_utcopy_6.c +++ b/kernel/generic/trmm_utcopy_6.c @@ -41,432 +41,510 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ - BLASLONG i, js; - BLASLONG X; - - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *ao1, *ao2, *ao3, *ao4; - - js = (n >> 2); - - if (js > 0){ - do { - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - ao3 = a + posX + (posY + 2) * lda; - ao4 = a + posX + (posY + 3) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - ao3 = a + posY + (posX + 2) * lda; - ao4 = a + posY + (posX + 3) * lda; - } - - i = (m >> 2); - if (i > 0) { - do { - if (X < posY) { - ao1 += 4; - ao2 += 4; - ao3 += 4; - ao4 += 4; - b += 16; - } else - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - data12 = *(ao3 + 3); - - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b[ 4] = data05; - b[ 5] = data06; - b[ 6] = data07; - b[ 7] = data08; - - b[ 8] = data09; - b[ 9] = data10; - b[10] = data11; - b[11] = data12; - b[12] = data13; - b[13] = data14; - b[14] = data15; - b[15] = data16; - - ao1 += 4 * lda; - ao2 += 4 * lda; - ao3 += 4 * lda; - ao4 += 4 * lda; - b += 16; - - } else { + BLASLONG i, js, ii; + BLASLONG X; + + FLOAT data01, data02, data05, data06; + FLOAT *ao1, *ao2, *ao3, *ao4, *ao5, *ao6; + + js = (n / 6); + + if (js > 0){ + do { + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + ao5 = a + posX + (posY + 4) * lda; + ao6 = a + posX + (posY + 5) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + ao5 = a + posY + (posX + 4) * lda; + ao6 = a + posY + (posX + 5) * lda; + } + + i = (m / 6); + if (i > 0) { + do { + if (X < posY) { + ao1 += 6; + ao2 += 6; + ao3 += 6; + ao4 += 6; + ao5 += 6; + ao6 += 6; + + b += 36; + } else if (X > posY) { + for (ii = 0; ii < 6; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + + ao1 += lda; + b += 6; + } + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + + } else { #ifdef UNIT - data05 = *(ao2 + 0); - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - - b[ 0] = ONE; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - - b[ 4] = data05; - b[ 5] = ONE; - b[ 6] = ZERO; - b[ 7] = ZERO; - - b[ 8] = data09; - b[ 9] = data10; - b[10] = ONE; - b[11] = ZERO; - - b[12] = data13; - b[13] = data14; - b[14] = data15; - b[15] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - data13 = *(ao4 + 0); - data14 = *(ao4 + 1); - data15 = *(ao4 + 2); - data16 = *(ao4 + 3); - - b[ 0] = data01; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - - b[ 4] = data05; - b[ 5] = data06; - b[ 6] = ZERO; - b[ 7] = ZERO; - - b[ 8] = data09; - b[ 9] = data10; - b[10] = data11; - b[11] = ZERO; - - b[12] = data13; - b[13] = data14; - b[14] = data15; - b[15] = data16; + b[ 0] = *(ao1 + 0); #endif + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; - ao1 += 4 * lda; - ao2 += 4 * lda; - ao3 += 4 * lda; - ao4 += 4 * lda; - - b += 16; - } - - X += 4; - i --; - } while (i > 0); - } - - i = (m & 3); - if (i) { - - if (X < posY) { - - if (m & 2) { - ao1 += 2; - ao2 += 2; - ao3 += 2; - ao4 += 2; - b += 8; - } - - if (m & 1) { - ao1 += 1; - ao2 += 1; - ao3 += 1; - ao4 += 1; - b += 4; - } - - } else - if (X > posY) { - if (m & 2) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - data07 = *(ao2 + 2); - data08 = *(ao2 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - b[ 4] = data05; - b[ 5] = data06; - b[ 6] = data07; - b[ 7] = data08; - - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 8; - } - - if (m & 1) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data03 = *(ao1 + 2); - data04 = *(ao1 + 3); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data03; - b[ 3] = data04; - - ao1 += lda; - b += 4; - } - - } else { + b[ 6] = *(ao2 + 0); +#ifdef UNIT + b[ 7] = ONE; +#else + b[ 7] = *(ao2 + 1); +#endif + b[ 8] = ZERO; + b[ 9] = ZERO; + b[10] = ZERO; + b[11] = ZERO; + + b[12] = *(ao3 + 0); + b[13] = *(ao3 + 1); +#ifdef UNIT + b[14] = ONE; +#else + b[14] = *(ao3 + 2); +#endif + b[15] = ZERO; + b[16] = ZERO; + b[17] = ZERO; + + b[18] = *(ao4 + 0); + b[19] = *(ao4 + 1); + b[20] = *(ao4 + 2); +#ifdef UNIT + b[21] = ONE; +#else + b[21] = *(ao4 + 3); +#endif + b[22] = ZERO; + b[23] = ZERO; + + b[24] = *(ao5 + 0); + b[25] = *(ao5 + 1); + b[26] = *(ao5 + 2); + b[27] = *(ao5 + 3); +#ifdef UNIT + b[28] = ONE; +#else + b[28] = *(ao5 + 4); +#endif + b[29] = ZERO; + + b[30] = *(ao6 + 0); + b[31] = *(ao6 + 1); + b[32] = *(ao6 + 2); + b[33] = *(ao6 + 3); + b[34] = *(ao6 + 4); +#ifdef UNIT + b[35] = ONE; +#else + b[35] = *(ao6 + 5); +#endif + + ao1 += 6 * lda; + ao2 += 6 * lda; + ao3 += 6 * lda; + ao4 += 6 * lda; + ao5 += 6 * lda; + ao6 += 6 * lda; + + b += 36; + } + + X += 6; + i --; + } while (i > 0); + } + + i = m % 6; + if (i > 0) { + if (X < posY) { + + ao1 += i; + ao2 += i; + ao3 += i; + ao4 += i; + ao5 += i; + ao6 += i; + b += 6 * i; + + } else if (X > posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + b[ 4] = *(ao1 + 4); + b[ 5] = *(ao1 + 5); + + ao1 += lda; + ao2 += lda; + ao3 += lda; + ao4 += lda; + ao5 += lda; + ao6 += lda; + b += 6; + } + } else { + +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + + if (i >= 2) { + b[ 0] = *(ao2 + 0); +#ifdef UNIT + b[ 1] = ONE; +#else + b[ 1] = *(ao2 + 1); +#endif + b[ 2] = ZERO; + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 3) { + b[ 0] = *(ao3 + 0); + b[ 1] = *(ao3 + 1); +#ifdef UNIT + b[ 2] = ONE; +#else + b[ 2] = *(ao3 + 2); +#endif + b[ 3] = ZERO; + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 4) { + b[ 0] = *(ao4 + 0); + b[ 1] = *(ao4 + 1); + b[ 2] = *(ao4 + 2); +#ifdef UNIT + b[ 3] = ONE; +#else + b[ 3] = *(ao4 + 3); +#endif + b[ 4] = ZERO; + b[ 5] = ZERO; + b += 6; + } + + if (i >= 5) { + b[ 0] = *(ao5 + 0); + b[ 1] = *(ao5 + 1); + b[ 2] = *(ao5 + 2); + b[ 3] = *(ao5 + 3); +#ifdef UNIT + b[ 4] = ONE; +#else + b[ 4] = *(ao5 + 4); +#endif + b[ 5] = ZERO; + b += 6; + } + } + } + + posY += 6; + js --; + } while (js > 0); + } /* End of main loop */ + + if ((n % 6) & 4){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + ao3 = a + posX + (posY + 2) * lda; + ao4 = a + posX + (posY + 3) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + ao3 = a + posY + (posX + 2) * lda; + ao4 = a + posY + (posX + 3) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X < posY) { + ao1 += 2; + ao2 += 2; + ao3 += 2; + ao4 += 2; + b += 8; + } else if (X > posY) { + for (ii = 0; ii < 2; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + ao1 += lda; + b += 4; + } + + ao2 += 2 * lda; + ao3 += 2 * lda; + ao4 += 2 * lda; + + } else { +#ifdef UNIT + b[ 0] = ONE; +#else + b[ 0] = *(ao1 + 0); +#endif + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + + b[ 4] = *(ao2 + 0); +#ifdef UNIT + b[ 5] = ONE; +#else + b[ 5] = *(ao2 + 1); +#endif + b[ 6] = ZERO; + b[ 7] = ZERO; + + b[ 8] = *(ao3 + 0); + b[ 9] = *(ao3 + 1); +#ifdef UNIT + b[ 10] = ONE; +#else + b[ 10] = *(ao3 + 2); +#endif + b[ 11] = ZERO; + + b[ 12] = *(ao4 + 0); + b[ 13] = *(ao4 + 1); + b[ 14] = *(ao4 + 2); +#ifdef UNIT + b[ 15] = ONE; +#else + b[ 15] = *(ao4 + 3); +#endif + + ao1 += 4 * lda; + ao2 += 4 * lda; + ao3 += 4 * lda; + ao4 += 4 * lda; + b += 16; + X += 4; + i -= 2; + continue; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i > 0) { + if (X < posY) { + ao1 += i; + ao2 += i; + ao3 += i; + ao4 += i; + b += 4 * i; + } else if (X > posY) { + for (ii = 0; ii < i; ii++){ + + b[ 0] = *(ao1 + 0); + b[ 1] = *(ao1 + 1); + b[ 2] = *(ao1 + 2); + b[ 3] = *(ao1 + 3); + ao1 += lda; + b += 4; + } + ao2 += lda; + ao3 += lda; + ao4 += lda; + } else { #ifdef UNIT - if (i >= 2) { - data05 = *(ao2 + 0); - } - - if (i >= 3) { - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - } - - b[ 0] = ONE; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b += 4; - - if(i >= 2) { - b[ 0] = data05; - b[ 1] = ONE; - b[ 2] = ZERO; - b[ 3] = ZERO; - b += 4; - } - - if (i >= 3) { - b[ 0] = data09; - b[ 1] = data10; - b[ 2] = ONE; - b[ 3] = ZERO; - b += 4; - } + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - - if (i >= 2) { - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - } - - if (i >= 3) { - data09 = *(ao3 + 0); - data10 = *(ao3 + 1); - data11 = *(ao3 + 2); - } - - b[ 0] = data01; - b[ 1] = ZERO; - b[ 2] = ZERO; - b[ 3] = ZERO; - b += 4; - - if(i >= 2) { - b[ 0] = data05; - b[ 1] = data06; - b[ 2] = ZERO; - b[ 3] = ZERO; - b += 4; - } - - if (i >= 3) { - b[ 0] = data09; - b[ 1] = data10; - b[ 2] = data11; - b[ 3] = ZERO; - b += 4; - } + b[ 0] = *(ao1 + 0); #endif - } - } - - posY += 4; - js --; - } while (js > 0); - } /* End of main loop */ - - if (n & 2){ - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - ao2 = a + posX + (posY + 1) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - ao2 = a + posY + (posX + 1) * lda; - } - - i = (m >> 1); - if (i > 0) { - do { - if (X < posY) { - ao1 += 2; - ao2 += 2; - b += 4; - - } else - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); - - b[ 0] = data01; - b[ 1] = data02; - b[ 2] = data05; - b[ 3] = data06; - - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 4; - } else { + b[ 1] = ZERO; + b[ 2] = ZERO; + b[ 3] = ZERO; + b += 4; + } + } + posY += 4; + } + + if ((n % 6) & 2){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + ao2 = a + posX + (posY + 1) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + ao2 = a + posY + (posX + 1) * lda; + } + + i = (m >> 1); + if (i > 0) { + do { + if (X < posY) { + ao1 += 2; + ao2 += 2; + b += 4; + + } else if (X > posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); + + b[ 0] = data01; + b[ 1] = data02; + b[ 2] = data05; + b[ 3] = data06; + + ao1 += 2 * lda; + ao2 += 2 * lda; + b += 4; + } else { #ifdef UNIT - data05 = *(ao2 + 0); + data05 = *(ao2 + 0); - b[ 0] = ONE; - b[ 1] = ZERO; - b[ 2] = data05; - b[ 3] = ONE; + b[ 0] = ONE; + b[ 1] = ZERO; + b[ 2] = data05; + b[ 3] = ONE; #else - data01 = *(ao1 + 0); - data05 = *(ao2 + 0); - data06 = *(ao2 + 1); + data01 = *(ao1 + 0); + data05 = *(ao2 + 0); + data06 = *(ao2 + 1); - b[ 0] = data01; - b[ 1] = ZERO; - b[ 2] = data05; - b[ 3] = data06; + b[ 0] = data01; + b[ 1] = ZERO; + b[ 2] = data05; + b[ 3] = data06; #endif - ao1 += 2 * lda; - ao2 += 2 * lda; - b += 4; - } - - X += 2; - i --; - } while (i > 0); - } - - i = (m & 1); - if (i) { - - if (X < posY) { - ao1 += 2; - b += 2; - } else - if (X > posY) { - data01 = *(ao1 + 0); - data02 = *(ao1 + 1); - - b[ 0] = data01; - b[ 1] = data02; - - ao1 += lda; - b += 2; - } else { + ao1 += 2 * lda; + ao2 += 2 * lda; + b += 4; + } + + X += 2; + i --; + } while (i > 0); + } + + i = (m & 1); + if (i) { + + if (X < posY) { + ao1 += 2; + b += 2; + } else if (X > posY) { + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + + b[ 0] = data01; + b[ 1] = data02; + + ao1 += lda; + b += 2; + } else { #ifdef UNIT - b[ 0] = ONE; - b[ 1] = ZERO; + b[ 0] = ONE; + b[ 1] = ZERO; #else - data01 = *(ao1 + 0); + data01 = *(ao1 + 0); - b[ 0] = data01; - b[ 1] = ZERO; + b[ 0] = data01; + b[ 1] = ZERO; #endif - b += 2; - } - } - posY += 2; - } - - if (n & 1){ - X = posX; - - if (posX <= posY) { - ao1 = a + posX + (posY + 0) * lda; - } else { - ao1 = a + posY + (posX + 0) * lda; - } - - i = m; - if (m > 0) { - do { - - if (X < posY) { - b += 1; - ao1 += 1; - } else - if (X > posY) { - data01 = *(ao1 + 0); - b[ 0] = data01; - ao1 += lda; - b += 1; - } else { + b += 2; + } + } + posY += 2; + } + + if ((n % 6) & 1){ + X = posX; + + if (posX <= posY) { + ao1 = a + posX + (posY + 0) * lda; + } else { + ao1 = a + posY + (posX + 0) * lda; + } + + i = m; + if (m > 0) { + do { + if (X < posY) { + b += 1; + ao1 += 1; + } else if (X > posY) { + data01 = *(ao1 + 0); + b[ 0] = data01; + ao1 += lda; + b += 1; + } else { #ifdef UNIT - b[ 0] = ONE; + b[ 0] = ONE; #else - data01 = *(ao1 + 0); - b[ 0] = data01; + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - ao1 += lda; - b += 1; - } + ao1 += lda; + b += 1; + } - X += 1; - i --; - } while (i > 0); - } - } + X += 1; + i --; + } while (i > 0); + } + } - return 0; + return 0; } diff --git a/kernel/generic/trsm_lncopy_6.c b/kernel/generic/trsm_lncopy_6.c index a37c50d1f8..b0cc7ba40c 100644 --- a/kernel/generic/trsm_lncopy_6.c +++ b/kernel/generic/trsm_lncopy_6.c @@ -49,22 +49,35 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *a1, *a2, *a3, *a4; + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT data09, data10, data11, data12, data13, data14; + FLOAT data17, data18, data19, data20, data21, data22; + FLOAT data25, data26, data27, data28, data29, data30; + FLOAT data33, data34, data35, data36, data37, data38; + FLOAT data41, data42, data43, data44, data45, data46; + + FLOAT *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8; jj = offset; - j = (n >> 2); + BLASLONG mmod6, nmod6; + mmod6 = m - (m/6)*6 ; + nmod6 = n - (n/6)*6 ; + + // j = (n >> 3); + j = (n / 6); while (j > 0){ a1 = a + 0 * lda; a2 = a + 1 * lda; a3 = a + 2 * lda; a4 = a + 3 * lda; + a5 = a + 4 * lda; + a6 = a + 5 * lda; - i = (m >> 2); ii = 0; + // i = (m >> 3); + i = (m / 6); while (i > 0) { if (ii == jj) { @@ -74,233 +87,562 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + +#ifndef UNIT + data10 = *(a2 + 1); +#endif + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); #ifndef UNIT - data06 = *(a2 + 1); + data19 = *(a3 + 2); #endif - data07 = *(a2 + 2); - data08 = *(a2 + 3); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); #ifndef UNIT - data11 = *(a3 + 2); + data28 = *(a4 + 3); #endif - data12 = *(a3 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); #ifndef UNIT - data16 = *(a4 + 3); + data37 = *(a5 + 4); +#endif + data38 = *(a5 + 5); + +#ifndef UNIT + data46 = *(a6 + 5); #endif *(b + 0) = INV(data01); - *(b + 4) = data02; - *(b + 5) = INV(data06); + *(b + 6) = data02; + *(b + 7) = INV(data10); - *(b + 8) = data03; - *(b + 9) = data07; - *(b + 10) = INV(data11); + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = INV(data19); + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = INV(data28); + + *(b + 24) = data05; + *(b + 25) = data13; + *(b + 26) = data21; + *(b + 27) = data29; + *(b + 28) = INV(data37); + + *(b + 30) = data06; + *(b + 31) = data14; + *(b + 32) = data22; + *(b + 33) = data30; + *(b + 34) = data38; + *(b + 35) = INV(data46); - *(b + 12) = data04; - *(b + 13) = data08; - *(b + 14) = data12; - *(b + 15) = INV(data16); } if (ii > jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data37 = *(a5 + 4); + data38 = *(a5 + 5); + + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); + data46 = *(a6 + 5); + + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = data19; + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = data28; + *(b + 22) = data36; + *(b + 23) = data44; + + *(b + 24) = data05; + *(b + 25) = data13; + *(b + 26) = data21; + *(b + 27) = data29; + *(b + 28) = data37; + *(b + 29) = data45; + + *(b + 30) = data06; + *(b + 31) = data14; + *(b + 32) = data22; + *(b + 33) = data30; + *(b + 34) = data38; + *(b + 35) = data46; + } + + a1 += 6; + a2 += 6; + a3 += 6; + a4 += 6; + a5 += 6; + a6 += 6; + a7 += 6; + a8 += 6; + b += 36; + + i --; + ii += 6; + } + + if (mmod6 & 4) { + if (ii == jj) { +#ifndef UNIT data01 = *(a1 + 0); +#endif data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); +#ifndef UNIT + data10 = *(a2 + 1); +#endif + data11 = *(a2 + 2); + data12 = *(a2 + 3); + + +#ifndef UNIT + data19 = *(a3 + 2); +#endif + data20 = *(a3 + 3); - data09 = *(a3 + 0); - data10 = *(a3 + 1); - data11 = *(a3 + 2); - data12 = *(a3 + 3); +#ifndef UNIT + data28 = *(a4 + 3); +#endif + + *(b + 0) = INV(data01); - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); - data16 = *(a4 + 3); + *(b + 6) = data02; + *(b + 7) = INV(data10); + + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = INV(data19); + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = INV(data28); + } + + if (ii > jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); *(b + 0) = data01; - *(b + 1) = data05; - *(b + 2) = data09; - *(b + 3) = data13; - *(b + 4) = data02; - *(b + 5) = data06; - *(b + 6) = data10; - *(b + 7) = data14; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = data19; + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = data28; + *(b + 22) = data36; + *(b + 23) = data44; - *(b + 8) = data03; - *(b + 9) = data07; - *(b + 10) = data11; - *(b + 11) = data15; - *(b + 12) = data04; - *(b + 13) = data08; - *(b + 14) = data12; - *(b + 15) = data16; } a1 += 4; a2 += 4; a3 += 4; a4 += 4; - b += 16; - - i --; + a5 += 4; + a6 += 4; + a7 += 4; + a8 += 4; + b += 24; ii += 4; } - if ((m & 2) != 0) { - - if (ii== jj) { + if (mmod6 & 2) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif data02 = *(a1 + 1); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); - *(b + 4) = data02; - *(b + 5) = INV(data06); + *(b + 6) = data02; + *(b + 7) = INV(data10); } if (ii > jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); - data05 = *(a3 + 0); - data06 = *(a3 + 1); - data07 = *(a4 + 0); - data08 = *(a4 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data41 = *(a6 + 0); + data42 = *(a6 + 1); *(b + 0) = data01; - *(b + 1) = data03; - *(b + 2) = data05; - *(b + 3) = data07; - *(b + 4) = data02; - *(b + 5) = data04; - *(b + 6) = data06; - *(b + 7) = data08; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; } a1 += 2; a2 += 2; a3 += 2; a4 += 2; - b += 8; - + a5 += 2; + a6 += 2; + a7 += 2; + a8 += 2; + b += 12; ii += 2; } - if ((m & 1) != 0) { + if (mmod6 & 1) { + if (ii == jj) { +#ifndef UNIT + data01 = *(a1 + 0); +#endif + + *(b + 0) = INV(data01); + } + + if (ii > jj) { + data01 = *(a1 + 0); + data09 = *(a2 + 0); + data17 = *(a3 + 0); + data25 = *(a4 + 0); + data33 = *(a5 + 0); + data41 = *(a6 + 0); + + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + } + b += 6; + } + + a += 6 * lda; + jj += 6; + j --; + } + + if (nmod6 & 4) { + a1 = a + 0 * lda; + a2 = a + 1 * lda; + a3 = a + 2 * lda; + a4 = a + 3 * lda; + + ii = 0; + i = (m >> 1); + while (i > 0) { + + if (ii == jj) { +#ifndef UNIT + data01 = *(a1 + 0); +#endif + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + +#ifndef UNIT + data10 = *(a2 + 1); +#endif + data11 = *(a2 + 2); + data12 = *(a2 + 3); + +#ifndef UNIT + data19 = *(a3 + 2); +#endif + data20 = *(a3 + 3); + +#ifndef UNIT + data28 = *(a4 + 3); +#endif + + *(b + 0) = INV(data01); + + *(b + 4) = data02; + *(b + 5) = INV(data10); + + *(b + 8) = data03; + *(b + 9) = data11; + *(b + 10) = INV(data19); + + *(b + 12) = data04; + *(b + 13) = data12; + *(b + 14) = data20; + *(b + 15) = INV(data28); + + a1 += 4; + a2 += 4; + a3 += 4; + a4 += 4; + b += 16; + + i -= 2; + ii += 4; + } + + else if (ii > jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data02; + *(b + 5) = data10; + *(b + 6) = data18; + *(b + 7) = data26; + + a1 += 2; + a2 += 2; + a3 += 2; + a4 += 2; + b += 8; + i -- ; + ii += 2; + } + + else { + a1 += 2; + a2 += 2; + a3 += 2; + a4 += 2; + b += 8; + i -- ; + ii += 2; + } + } - if (ii== jj) { + if (m & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif *(b + 0) = INV(data01); } - if (ii > jj) { + if (ii > jj) { data01 = *(a1 + 0); - data02 = *(a2 + 0); - data03 = *(a3 + 0); - data04 = *(a4 + 0); + data09 = *(a2 + 0); + data17 = *(a3 + 0); + data25 = *(a4 + 0); *(b + 0) = data01; - *(b + 1) = data02; - *(b + 2) = data03; - *(b + 3) = data04; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; } - b += 4; + b += 4; } a += 4 * lda; jj += 4; - j --; } - if (n & 2) { + if (nmod6 & 2) { a1 = a + 0 * lda; a2 = a + 1 * lda; - i = (m >> 1); ii = 0; + i = (m >> 1); while (i > 0) { if (ii == jj) { - #ifndef UNIT data01 = *(a1 + 0); #endif data02 = *(a1 + 1); #ifndef UNIT - data04 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); *(b + 2) = data02; - *(b + 3) = INV(data04); + *(b + 3) = INV(data10); } if (ii > jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); *(b + 0) = data01; - *(b + 1) = data03; + *(b + 1) = data09; *(b + 2) = data02; - *(b + 3) = data04; + *(b + 3) = data10; } a1 += 2; a2 += 2; - b += 4; + b += 4; i --; ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { + if (m & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif *(b + 0) = INV(data01); } - if (ii > jj) { + if (ii > jj) { data01 = *(a1 + 0); - data02 = *(a2 + 0); + data09 = *(a2 + 0); + *(b + 0) = data01; - *(b + 1) = data02; + *(b + 1) = data09; } - b += 2; + b += 2; } + a += 2 * lda; jj += 2; } - if (n & 1) { + if (nmod6 & 1) { a1 = a + 0 * lda; - i = m; ii = 0; + i = m; while (i > 0) { if (ii == jj) { @@ -315,8 +657,9 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 0) = data01; } - a1+= 1; - b += 1; + a1 += 1; + b += 1; + i --; ii += 1; } diff --git a/kernel/generic/trsm_ltcopy_6.c b/kernel/generic/trsm_ltcopy_6.c index 12043eb335..9cda3d72ff 100644 --- a/kernel/generic/trsm_ltcopy_6.c +++ b/kernel/generic/trsm_ltcopy_6.c @@ -49,22 +49,35 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *a1, *a2, *a3, *a4; + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT data09, data10, data11, data12, data13, data14; + FLOAT data17, data18, data19, data20, data21, data22; + FLOAT data25, data26, data27, data28, data29, data30; + FLOAT data33, data34, data35, data36, data37, data38; + FLOAT data41, data42, data43, data44, data45, data46; + + FLOAT *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8; jj = offset; - j = (n >> 2); + BLASLONG mmod6, nmod6, k; + mmod6 = m - (m/6)*6 ; + nmod6 = n - (n/6)*6 ; + + // j = (n >> 3); + j = (n / 6); while (j > 0){ a1 = a + 0 * lda; a2 = a + 1 * lda; a3 = a + 2 * lda; a4 = a + 3 * lda; + a5 = a + 4 * lda; + a6 = a + 5 * lda; - i = (m >> 2); ii = 0; + // i = (m >> 3); + i = (m / 6); while (i > 0) { if (ii == jj) { @@ -75,35 +88,65 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif - data07 = *(a2 + 2); - data08 = *(a2 + 3); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); #ifndef UNIT - data11 = *(a3 + 2); + data19 = *(a3 + 2); #endif - data12 = *(a3 + 3); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); #ifndef UNIT - data16 = *(a4 + 3); + data28 = *(a4 + 3); +#endif + data29 = *(a4 + 4); + data30 = *(a4 + 5); + +#ifndef UNIT + data37 = *(a5 + 4); +#endif + data38 = *(a5 + 5); + +#ifndef UNIT + data46 = *(a6 + 5); #endif *(b + 0) = INV(data01); *(b + 1) = data02; *(b + 2) = data03; *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; - *(b + 5) = INV(data06); - *(b + 6) = data07; - *(b + 7) = data08; + *(b + 7) = INV(data10); + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; - *(b + 10) = INV(data11); - *(b + 11) = data12; + *(b + 14) = INV(data19); + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; - *(b + 15) = INV(data16); + *(b + 21) = INV(data28); + *(b + 22) = data29; + *(b + 23) = data30; + + *(b + 28) = INV(data37); + *(b + 29) = data38; + + *(b + 35) = INV(data46); } if (ii < jj) { @@ -111,21 +154,182 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data37 = *(a5 + 4); + data38 = *(a5 + 5); + + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); + data46 = *(a6 + 5); + + *(b + 0) = data01; + *(b + 1) = data02; + *(b + 2) = data03; + *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = data19; + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = data28; + *(b + 22) = data29; + *(b + 23) = data30; + + *(b + 24) = data33; + *(b + 25) = data34; + *(b + 26) = data35; + *(b + 27) = data36; + *(b + 28) = data37; + *(b + 29) = data38; + *(b + 30) = data41; + *(b + 31) = data42; + *(b + 32) = data43; + *(b + 33) = data44; + *(b + 34) = data45; + *(b + 35) = data46; + } - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); + a1 += 6 * lda; + a2 += 6 * lda; + a3 += 6 * lda; + a4 += 6 * lda; + a5 += 6 * lda; + a6 += 6 * lda; + a7 += 6 * lda; + a8 += 6 * lda; + b += 36; - data09 = *(a3 + 0); - data10 = *(a3 + 1); - data11 = *(a3 + 2); - data12 = *(a3 + 3); + i --; + ii += 6; + } - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); - data16 = *(a4 + 3); + if (mmod6 & 4) { + if (ii == jj) { + +#ifndef UNIT + data01 = *(a1 + 0); +#endif + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + +#ifndef UNIT + data10 = *(a2 + 1); +#endif + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + +#ifndef UNIT + data19 = *(a3 + 2); +#endif + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + +#ifndef UNIT + data28 = *(a4 + 3); +#endif + data29 = *(a4 + 4); + data30 = *(a4 + 5); + + *(b + 0) = INV(data01); + *(b + 1) = data02; + *(b + 2) = data03; + *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; + + *(b + 7) = INV(data10); + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; + + *(b + 14) = INV(data19); + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; + + *(b + 21) = INV(data28); + *(b + 22) = data29; + *(b + 23) = data30; + } + + if (ii < jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); *(b + 0) = data01; *(b + 1) = data02; @@ -133,32 +337,38 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 3) = data04; *(b + 4) = data05; *(b + 5) = data06; - *(b + 6) = data07; - *(b + 7) = data08; - - *(b + 8) = data09; - *(b + 9) = data10; - *(b + 10) = data11; - *(b + 11) = data12; - *(b + 12) = data13; - *(b + 13) = data14; - *(b + 14) = data15; - *(b + 15) = data16; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = data19; + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = data28; + *(b + 22) = data29; + *(b + 23) = data30; } a1 += 4 * lda; a2 += 4 * lda; - a3 += 4 * lda; - a4 += 4 * lda; - b += 16; + /* a3 += 4 * lda; + a4 += 4 * lda; */ + b += 24; - i --; ii += 4; } - if ((m & 2) != 0) { - - if (ii== jj) { + if (mmod6 & 2) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); @@ -166,22 +376,29 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif - data07 = *(a2 + 2); - data08 = *(a2 + 3); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); *(b + 0) = INV(data01); *(b + 1) = data02; *(b + 2) = data03; *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; - *(b + 5) = INV(data06); - *(b + 6) = data07; - *(b + 7) = data08; - + *(b + 7) = INV(data10); + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; } if (ii < jj) { @@ -189,11 +406,15 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); *(b + 0) = data01; *(b + 1) = data02; @@ -201,20 +422,23 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 3) = data04; *(b + 4) = data05; *(b + 5) = data06; - *(b + 6) = data07; - *(b + 7) = data08; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; } a1 += 2 * lda; - a2 += 2 * lda; - b += 8; + // a2 += 2 * lda; + b += 12; ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { + if (mmod6 & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); @@ -222,38 +446,78 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); *(b + 0) = INV(data01); *(b + 1) = data02; *(b + 2) = data03; *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; } - if (ii < jj) { + if (ii < jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); *(b + 0) = data01; *(b + 1) = data02; *(b + 2) = data03; *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; } - b += 4; + b += 6; } + a += 6; + jj += 6; + j --; + } + if (nmod6 & 4) { + + a1 = a; a += 4; + ii = 0; + + for (i = 0; i < m; i++) { + + if ((ii >= jj ) && (ii - jj < 4)) { + *(b + ii - jj) = INV(*(a1 + ii - jj)); + + for (k = ii - jj + 1; k < 4; k ++) { + *(b + k) = *(a1 + k); + } + + } + + if (ii - jj < 0) { + *(b + 0) = *(a1 + 0); + *(b + 1) = *(a1 + 1); + *(b + 2) = *(a1 + 2); + *(b + 3) = *(a1 + 3); + } + + b += 4; + a1 += lda; + ii ++; + } + jj += 4; - j --; } - if (n & 2) { + if (nmod6 & 2) { + a1 = a + 0 * lda; a2 = a + 1 * lda; - i = (m >> 1); ii = 0; + i = (m >> 1); while (i > 0) { if (ii == jj) { @@ -264,25 +528,24 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); #ifndef UNIT - data04 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); *(b + 1) = data02; - - *(b + 3) = INV(data04); + *(b + 3) = INV(data10); } if (ii < jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); *(b + 0) = data01; *(b + 1) = data02; - *(b + 2) = data03; - *(b + 3) = data04; + *(b + 2) = data09; + *(b + 3) = data10; } a1 += 2 * lda; @@ -293,19 +556,22 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { + if (m & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif + // data02 = *(a1 + 1); + *(b + 0) = INV(data01); + // *(b + 1) = data02; } - if (ii < jj) { + if (ii < jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); + *(b + 0) = data01; *(b + 1) = data02; } @@ -315,11 +581,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT jj += 2; } - if (n & 1) { + if (nmod6 & 1) { + a1 = a + 0 * lda; - i = m; ii = 0; + i = m; while (i > 0) { if (ii == jj) { @@ -334,12 +601,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 0) = data01; } - a1 += 1 * lda; + a1 += lda; b += 1; i --; ii += 1; } + } return 0; diff --git a/kernel/generic/trsm_uncopy_6.c b/kernel/generic/trsm_uncopy_6.c index a1bb1e2034..e20773da47 100644 --- a/kernel/generic/trsm_uncopy_6.c +++ b/kernel/generic/trsm_uncopy_6.c @@ -36,7 +36,6 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ -#include #include "common.h" #ifndef UNIT @@ -49,22 +48,38 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *a1, *a2, *a3, *a4; + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT data09, data10, data11, data12, data13, data14; + FLOAT data17, data18, data19, data20, data21, data22; + FLOAT data25, data26, data27, data28, data29, data30; + FLOAT data33, data34, data35, data36, data37, data38; + FLOAT data41, data42, data43, data44, data45, data46; + + FLOAT *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8; jj = offset; - j = (n >> 2); + BLASLONG mmod6, nmod6; + mmod6 = m - (m/6)*6 ; + nmod6 = n - (n/6)*6 ; + + // j = (n >> 3); + j = (n / 6); while (j > 0){ a1 = a + 0 * lda; a2 = a + 1 * lda; a3 = a + 2 * lda; a4 = a + 3 * lda; + a5 = a + 4 * lda; + a6 = a + 5 * lda; + // a7 = a + 6 * lda; + // a8 = a + 7 * lda; - i = (m >> 2); ii = 0; + + // i = (m >> 3); + i = (m / 6); while (i > 0) { if (ii == jj) { @@ -73,188 +88,729 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data01 = *(a1 + 0); #endif - data05 = *(a2 + 0); + data09 = *(a2 + 0); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif - data09 = *(a3 + 0); - data10 = *(a3 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); #ifndef UNIT - data11 = *(a3 + 2); + data19 = *(a3 + 2); #endif - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); #ifndef UNIT - data16 = *(a4 + 3); + data28 = *(a4 + 3); #endif - *(b + 0) = INV(data01); - *(b + 1) = data05; - *(b + 2) = data09; - *(b + 3) = data13; + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); +#ifndef UNIT + data37 = *(a5 + 4); +#endif - *(b + 5) = INV(data06); - *(b + 6) = data10; - *(b + 7) = data14; + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); +#ifndef UNIT + data46 = *(a6 + 5); +#endif - *(b + 10) = INV(data11); - *(b + 11) = data15; +// data49 = *(a7 + 0); +// data50 = *(a7 + 1); +// data51 = *(a7 + 2); +// data52 = *(a7 + 3); +// data53 = *(a7 + 4); +// data54 = *(a7 + 5); +// #ifndef UNIT +// data55 = *(a7 + 6); +// #endif +// +// data57 = *(a8 + 0); +// data58 = *(a8 + 1); +// data59 = *(a8 + 2); +// data60 = *(a8 + 3); +// data61 = *(a8 + 4); +// data62 = *(a8 + 5); +// data63 = *(a8 + 6); +// #ifndef UNIT +// data64 = *(a8 + 7); +// #endif - *(b + 15) = INV(data16); + *(b + 0) = INV(data01); + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 7) = INV(data10); + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + + *(b + 14) = INV(data19); + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + // *(b + 22) = data51; + // *(b + 23) = data59; + + *(b + 21) = INV(data28); + *(b + 22) = data36; + *(b + 23) = data44; + // *(b + 30) = data52; + // *(b + 31) = data60; + + *(b + 28) = INV(data37); + *(b + 29) = data45; + // *(b + 38) = data53; + // *(b + 39) = data61; + + *(b + 35) = INV(data46); + // *(b + 46) = data54; + // *(b + 47) = data62; + + // *(b + 54) = INV(data55); + // *(b + 55) = data63; + + // *(b + 63) = INV(data64); } if (ii < jj) { - data01 = *(a1 + 0); data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + // data07 = *(a1 + 6); + // data08 = *(a1 + 7); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + // data15 = *(a2 + 6); + // data16 = *(a2 + 7); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + // data23 = *(a3 + 6); + // data24 = *(a3 + 7); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); + // data31 = *(a4 + 6); + // data32 = *(a4 + 7); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data37 = *(a5 + 4); + data38 = *(a5 + 5); + // data39 = *(a5 + 6); + // data40 = *(a5 + 7); + + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); + data46 = *(a6 + 5); + // data47 = *(a6 + 6); + // data48 = *(a6 + 7); + + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data51 = *(a7 + 2); + // data52 = *(a7 + 3); + // data53 = *(a7 + 4); + // data54 = *(a7 + 5); + // data55 = *(a7 + 6); + // data56 = *(a7 + 7); + + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + // data59 = *(a8 + 2); + // data60 = *(a8 + 3); + // data61 = *(a8 + 4); + // data62 = *(a8 + 5); + // data63 = *(a8 + 6); + // data64 = *(a8 + 7); - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = data19; + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + // *(b + 22) = data51; + // *(b + 23) = data59; + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = data28; + *(b + 22) = data36; + *(b + 23) = data44; + // *(b + 30) = data52; + // *(b + 31) = data60; + + *(b + 24) = data05; + *(b + 25) = data13; + *(b + 26) = data21; + *(b + 27) = data29; + *(b + 28) = data37; + *(b + 29) = data45; + // *(b + 38) = data53; + // *(b + 39) = data61; + + *(b + 30) = data06; + *(b + 31) = data14; + *(b + 32) = data22; + *(b + 33) = data30; + *(b + 34) = data38; + *(b + 35) = data46; + // *(b + 46) = data54; + // *(b + 47) = data62; + + // *(b + 48) = data07; + // *(b + 49) = data15; + // *(b + 50) = data23; + // *(b + 51) = data31; + // *(b + 52) = data39; + // *(b + 53) = data47; + // *(b + 54) = data55; + // *(b + 55) = data63; + + // *(b + 56) = data08; + // *(b + 57) = data16; + // *(b + 58) = data24; + // *(b + 59) = data32; + // *(b + 60) = data40; + // *(b + 61) = data48; + // *(b + 62) = data56; + // *(b + 63) = data64; + } + + a1 += 6; + a2 += 6; + a3 += 6; + a4 += 6; + a5 += 6; + a6 += 6; + // a7 += 6; + // a8 += 6; + b += 36; - data09 = *(a3 + 0); - data10 = *(a3 + 1); - data11 = *(a3 + 2); - data12 = *(a3 + 3); + i --; + ii += 6; + } - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); - data16 = *(a4 + 3); + if (mmod6 & 4) { + if (ii == jj) { + +#ifndef UNIT + data01 = *(a1 + 0); +#endif + + data09 = *(a2 + 0); +#ifndef UNIT + data10 = *(a2 + 1); +#endif + + data17 = *(a3 + 0); + data18 = *(a3 + 1); +#ifndef UNIT + data19 = *(a3 + 2); +#endif + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); +#ifndef UNIT + data28 = *(a4 + 3); +#endif + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data51 = *(a7 + 2); + // data52 = *(a7 + 3); + + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + // data59 = *(a8 + 2); + // data60 = *(a8 + 3); + + *(b + 0) = INV(data01); + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 7) = INV(data10); + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + + *(b + 14) = INV(data19); + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + // *(b + 22) = data51; + // *(b + 23) = data59; + + *(b + 21) = INV(data28); + *(b + 22) = data36; + *(b + 23) = data44; + // *(b + 30) = data52; + // *(b + 31) = data60; + + } + + if (ii < jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data51 = *(a7 + 2); + // data52 = *(a7 + 3); + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + // data59 = *(a8 + 2); + // data60 = *(a8 + 3); *(b + 0) = data01; - *(b + 1) = data05; - *(b + 2) = data09; - *(b + 3) = data13; - *(b + 4) = data02; - *(b + 5) = data06; - *(b + 6) = data10; - *(b + 7) = data14; - - *(b + 8) = data03; - *(b + 9) = data07; - *(b + 10) = data11; - *(b + 11) = data15; - *(b + 12) = data04; - *(b + 13) = data08; - *(b + 14) = data12; - *(b + 15) = data16; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + + *(b + 12) = data03; + *(b + 13) = data11; + *(b + 14) = data19; + *(b + 15) = data27; + *(b + 16) = data35; + *(b + 17) = data43; + // *(b + 22) = data51; + // *(b + 23) = data59; + + *(b + 18) = data04; + *(b + 19) = data12; + *(b + 20) = data20; + *(b + 21) = data28; + *(b + 22) = data36; + *(b + 23) = data44; + // *(b + 30) = data52; + // *(b + 31) = data60; } a1 += 4; a2 += 4; a3 += 4; a4 += 4; - b += 16; - - i --; + a5 += 4; + a6 += 4; + // a7 += 4; + // a8 += 4; + b += 24; ii += 4; } - if ((m & 2) != 0) { + if (mmod6 & 2) { + if (ii == jj) { + +#ifndef UNIT + data01 = *(a1 + 0); +#endif + data09 = *(a2 + 0); +#ifndef UNIT + data10 = *(a2 + 1); +#endif + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); - if (ii== jj) { + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data41 = *(a6 + 0); + data42 = *(a6 + 1); + + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + + *(b + 0) = INV(data01); + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 7) = INV(data10); + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + } + + if (ii < jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data41 = *(a6 + 0); + data42 = *(a6 + 1); + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + + *(b + 6) = data02; + *(b + 7) = data10; + *(b + 8) = data18; + *(b + 9) = data26; + *(b + 10) = data34; + *(b + 11) = data42; + // *(b + 14) = data50; + // *(b + 15) = data58; + } + + a1 += 2; + a2 += 2; + a3 += 2; + a4 += 2; + a5 += 2; + a6 += 2; + a7 += 2; + a8 += 2; + b += 12; + ii += 2; + } + + if (mmod6 & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif + data09 = *(a2 + 0); + data17 = *(a3 + 0); + data25 = *(a4 + 0); + data33 = *(a5 + 0); + data41 = *(a6 + 0); + // data49 = *(a7 + 0); + // data57 = *(a8 + 0); - data05 = *(a2 + 0); + *(b + 0) = INV(data01); + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + } + + if (ii < jj) { + data01 = *(a1 + 0); + // data02 = *(a1 + 1); + data09 = *(a2 + 0); + // data10 = *(a2 + 1); + data17 = *(a3 + 0); + // data18 = *(a3 + 1); + data25 = *(a4 + 0); + // data26 = *(a4 + 1); + + // // data33 = *(a5 + 0); + // data34 = *(a5 + 1); + // // data41 = *(a6 + 0); + // data42 = *(a6 + 1); + // data49 = *(a7 + 0); + // data50 = *(a7 + 1); + // data57 = *(a8 + 0); + // data58 = *(a8 + 1); + + *(b + 0) = data01; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data33; + *(b + 5) = data41; + // *(b + 6) = data49; + // *(b + 7) = data57; + } + b += 6; + // ii += 1; + } + + a += 6 * lda; + jj += 6; + j --; + } + + + if (nmod6 & 4) { + a1 = a + 0 * lda; + a2 = a + 1 * lda; + a3 = a + 2 * lda; + a4 = a + 3 * lda; + + ii = 0; + + i = (m >> 1); + while (i > 0) { + + if (ii == jj) { + +#ifndef UNIT + data01 = *(a1 + 0); +#endif + + data09 = *(a2 + 0); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif - data09 = *(a3 + 0); - data10 = *(a3 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); +#ifndef UNIT + data19 = *(a3 + 2); +#endif - data13 = *(a4 + 0); - data14 = *(a4 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); +#ifndef UNIT + data28 = *(a4 + 3); +#endif *(b + 0) = INV(data01); - *(b + 1) = data05; - *(b + 2) = data09; - *(b + 3) = data13; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + + *(b + 5) = INV(data10); + *(b + 6) = data18; + *(b + 7) = data26; + + *(b + 10) = INV(data19); + *(b + 11) = data27; - *(b + 5) = INV(data06); - *(b + 6) = data10; - *(b + 7) = data14; + *(b + 15) = INV(data28); + + a1 += 4; + a2 += 4; + a3 += 4; + a4 += 4; + b += 16; + + i -= 2; + ii += 4; } - if (ii < jj) { + else if (ii < jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); - data05 = *(a3 + 0); - data06 = *(a3 + 1); - data07 = *(a4 + 0); - data08 = *(a4 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data25 = *(a4 + 0); + data26 = *(a4 + 1); *(b + 0) = data01; - *(b + 1) = data02; - *(b + 2) = data03; - *(b + 3) = data04; - *(b + 4) = data05; - *(b + 5) = data06; - *(b + 6) = data07; - *(b + 7) = data08; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; + *(b + 4) = data02; + *(b + 5) = data10; + *(b + 6) = data18; + *(b + 7) = data26; + + a1 += 2; + a2 += 2; + a3 += 2; + a4 += 2; + b += 8; + + i -- ; + ii += 2; } + else{ + a1 += 2; a2 += 2; - b += 8; + a3 += 2; + a4 += 2; + b += 8; + i -- ; ii += 2; } + } - if ((m & 1) != 0) { + if (m & 1) { + if (ii == jj) { - if (ii== jj) { #ifndef UNIT data01 = *(a1 + 0); #endif - - data05 = *(a2 + 0); - data09 = *(a3 + 0); - data13 = *(a4 + 0); + data09 = *(a2 + 0); + data17 = *(a3 + 0); + data25 = *(a4 + 0); *(b + 0) = INV(data01); - *(b + 1) = data05; - *(b + 2) = data09; - *(b + 3) = data13; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; } - if (ii < jj) { + if (ii < jj) { data01 = *(a1 + 0); - data02 = *(a2 + 0); - data03 = *(a3 + 0); - data04 = *(a4 + 0); + data09 = *(a2 + 0); + data17 = *(a3 + 0); + data25 = *(a4 + 0); *(b + 0) = data01; - *(b + 1) = data02; - *(b + 2) = data03; - *(b + 3) = data04; + *(b + 1) = data09; + *(b + 2) = data17; + *(b + 3) = data25; } b += 4; + // ii += 1; } - a += 4 * lda; + a += 4 * lda; jj += 4; - j --; } - if (n & 2) { + if (nmod6 & 2) { a1 = a + 0 * lda; a2 = a + 1 * lda; - i = (m >> 1); ii = 0; + + i = (m >> 1); while (i > 0) { if (ii == jj) { @@ -263,68 +819,70 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data01 = *(a1 + 0); #endif - data03 = *(a2 + 0); + data09 = *(a2 + 0); #ifndef UNIT - data04 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); - *(b + 1) = data03; - *(b + 3) = INV(data04); + *(b + 1) = data09; + + *(b + 3) = INV(data10); } if (ii < jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); *(b + 0) = data01; - *(b + 1) = data03; + *(b + 1) = data09; *(b + 2) = data02; - *(b + 3) = data04; + *(b + 3) = data10; } a1 += 2; a2 += 2; - b += 4; + b += 4; i --; ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { - + if (m & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif - - data03 = *(a2 + 0); + data09 = *(a2 + 0); *(b + 0) = INV(data01); - *(b + 1) = data03; + *(b + 1) = data09; } - if (ii < jj) { + if (ii < jj) { data01 = *(a1 + 0); - data02 = *(a2 + 0); + data09 = *(a2 + 0); + *(b + 0) = data01; - *(b + 1) = data02; + *(b + 1) = data09; } b += 2; + // ii += 1; } - a += 2 * lda; + + a += 2 * lda; jj += 2; } - if (n & 1) { + if (nmod6 & 1) { a1 = a + 0 * lda; - i = m; ii = 0; + + i = m; while (i > 0) { if (ii == jj) { @@ -339,10 +897,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 0) = data01; } - a1+= 1; - b += 1; + a1 += 1; + b += 1; i --; - ii += 1; + ii ++; } } diff --git a/kernel/generic/trsm_utcopy_6.c b/kernel/generic/trsm_utcopy_6.c index f83617224f..6afc005470 100644 --- a/kernel/generic/trsm_utcopy_6.c +++ b/kernel/generic/trsm_utcopy_6.c @@ -49,21 +49,34 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01, data02, data03, data04, data05, data06, data07, data08; - FLOAT data09, data10, data11, data12, data13, data14, data15, data16; - FLOAT *a1, *a2, *a3, *a4; + FLOAT data01, data02, data03, data04, data05, data06; + FLOAT data09, data10, data11, data12, data13, data14; + FLOAT data17, data18, data19, data20, data21, data22; + FLOAT data25, data26, data27, data28, data29, data30; + FLOAT data33, data34, data35, data36, data37, data38; + FLOAT data41, data42, data43, data44, data45, data46; + + FLOAT *a1, *a2, *a3, *a4, *a5, *a6, *a7, *a8; jj = offset; - j = (n >> 2); + BLASLONG mmod6, nmod6, k; + mmod6 = m - (m/6)*6 ; + nmod6 = n - (n/6)*6 ; + + // j = (n >> 3); + j = (n / 6); while (j > 0){ a1 = a + 0 * lda; a2 = a + 1 * lda; a3 = a + 2 * lda; a4 = a + 3 * lda; + a5 = a + 4 * lda; + a6 = a + 5 * lda; - i = (m >> 2); + // i = (m >> 3); + i = (m / 6); ii = 0; while (i > 0) { @@ -72,37 +85,67 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data01 = *(a1 + 0); #endif - data05 = *(a2 + 0); + data09 = *(a2 + 0); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif - data09 = *(a3 + 0); - data10 = *(a3 + 1); + data17 = *(a3 + 0); + data18 = *(a3 + 1); #ifndef UNIT - data11 = *(a3 + 2); + data19 = *(a3 + 2); #endif - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); #ifndef UNIT - data16 = *(a4 + 3); + data28 = *(a4 + 3); #endif - *(b + 0) = INV(data01); + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); +#ifndef UNIT + data37 = *(a5 + 4); +#endif - *(b + 4) = data05; - *(b + 5) = INV(data06); + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); +#ifndef UNIT + data46 = *(a6 + 5); +#endif - *(b + 8) = data09; - *(b + 9) = data10; - *(b + 10) = INV(data11); + *(b + 0) = INV(data01); - *(b + 12) = data13; - *(b + 13) = data14; - *(b + 14) = data15; - *(b + 15) = INV(data16); + *(b + 6) = data09; + *(b + 7) = INV(data10); + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = INV(data19); + + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = INV(data28); + + *(b + 24) = data33; + *(b + 25) = data34; + *(b + 26) = data35; + *(b + 27) = data36; + *(b + 28) = INV(data37); + + *(b + 30) = data41; + *(b + 31) = data42; + *(b + 32) = data43; + *(b + 33) = data44; + *(b + 34) = data45; + *(b + 35) = INV(data46); } if (ii > jj) { @@ -110,21 +153,166 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); + + data33 = *(a5 + 0); + data34 = *(a5 + 1); + data35 = *(a5 + 2); + data36 = *(a5 + 3); + data37 = *(a5 + 4); + data38 = *(a5 + 5); + + data41 = *(a6 + 0); + data42 = *(a6 + 1); + data43 = *(a6 + 2); + data44 = *(a6 + 3); + data45 = *(a6 + 4); + data46 = *(a6 + 5); + + *(b + 0) = data01; + *(b + 1) = data02; + *(b + 2) = data03; + *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = data19; + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = data28; + *(b + 22) = data29; + *(b + 23) = data30; + + *(b + 24) = data33; + *(b + 25) = data34; + *(b + 26) = data35; + *(b + 27) = data36; + *(b + 28) = data37; + *(b + 29) = data38; + *(b + 30) = data41; + *(b + 31) = data42; + *(b + 32) = data43; + *(b + 33) = data44; + *(b + 34) = data45; + *(b + 35) = data46; - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); + } - data09 = *(a3 + 0); - data10 = *(a3 + 1); - data11 = *(a3 + 2); - data12 = *(a3 + 3); + a1 += 6 * lda; + a2 += 6 * lda; + a3 += 6 * lda; + a4 += 6 * lda; + a5 += 6 * lda; + a6 += 6 * lda; + a7 += 6 * lda; + a8 += 6 * lda; + b += 36; - data13 = *(a4 + 0); - data14 = *(a4 + 1); - data15 = *(a4 + 2); - data16 = *(a4 + 3); + i --; + ii += 6; + } + + if (mmod6 & 4) { + if (ii == jj) { +#ifndef UNIT + data01 = *(a1 + 0); +#endif + + data09 = *(a2 + 0); +#ifndef UNIT + data10 = *(a2 + 1); +#endif + + data17 = *(a3 + 0); + data18 = *(a3 + 1); +#ifndef UNIT + data19 = *(a3 + 2); +#endif + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); +#ifndef UNIT + data28 = *(a4 + 3); +#endif + + *(b + 0) = INV(data01); + + *(b + 6) = data09; + *(b + 7) = INV(data10); + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = INV(data19); + + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = INV(data28); + } + + if (ii > jj) { + data01 = *(a1 + 0); + data02 = *(a1 + 1); + data03 = *(a1 + 2); + data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); + + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); + + data17 = *(a3 + 0); + data18 = *(a3 + 1); + data19 = *(a3 + 2); + data20 = *(a3 + 3); + data21 = *(a3 + 4); + data22 = *(a3 + 5); + + data25 = *(a4 + 0); + data26 = *(a4 + 1); + data27 = *(a4 + 2); + data28 = *(a4 + 3); + data29 = *(a4 + 4); + data30 = *(a4 + 5); *(b + 0) = data01; *(b + 1) = data02; @@ -132,44 +320,49 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 3) = data04; *(b + 4) = data05; *(b + 5) = data06; - *(b + 6) = data07; - *(b + 7) = data08; - - *(b + 8) = data09; - *(b + 9) = data10; - *(b + 10) = data11; - *(b + 11) = data12; - *(b + 12) = data13; - *(b + 13) = data14; - *(b + 14) = data15; - *(b + 15) = data16; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; + + *(b + 12) = data17; + *(b + 13) = data18; + *(b + 14) = data19; + *(b + 15) = data20; + *(b + 16) = data21; + *(b + 17) = data22; + *(b + 18) = data25; + *(b + 19) = data26; + *(b + 20) = data27; + *(b + 21) = data28; + *(b + 22) = data29; + *(b + 23) = data30; } a1 += 4 * lda; a2 += 4 * lda; - a3 += 4 * lda; - a4 += 4 * lda; - b += 16; - - i --; + /* a3 += 4 * lda; + a4 += 4 * lda; */ + b += 24; ii += 4; } - if ((m & 2) != 0) { - - if (ii== jj) { + if (mmod6 & 2) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif - data05 = *(a2 + 0); + + data09 = *(a2 + 0); #ifndef UNIT - data06 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); - - *(b + 4) = data05; - *(b + 5) = INV(data06); + *(b + 6) = data09; + *(b + 7) = INV(data10); } if (ii > jj) { @@ -177,11 +370,15 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); - data05 = *(a2 + 0); - data06 = *(a2 + 1); - data07 = *(a2 + 2); - data08 = *(a2 + 3); + data09 = *(a2 + 0); + data10 = *(a2 + 1); + data11 = *(a2 + 2); + data12 = *(a2 + 3); + data13 = *(a2 + 4); + data14 = *(a2 + 5); *(b + 0) = data01; *(b + 1) = data02; @@ -189,46 +386,84 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 3) = data04; *(b + 4) = data05; *(b + 5) = data06; - *(b + 6) = data07; - *(b + 7) = data08; + *(b + 6) = data09; + *(b + 7) = data10; + *(b + 8) = data11; + *(b + 9) = data12; + *(b + 10) = data13; + *(b + 11) = data14; } a1 += 2 * lda; - a2 += 2 * lda; - b += 8; - + // a2 += 2 * lda; + b += 12; ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { + if (mmod6 & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif *(b + 0) = INV(data01); } - if (ii > jj) { + if (ii > jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); data03 = *(a1 + 2); data04 = *(a1 + 3); + data05 = *(a1 + 4); + data06 = *(a1 + 5); *(b + 0) = data01; *(b + 1) = data02; *(b + 2) = data03; *(b + 3) = data04; + *(b + 4) = data05; + *(b + 5) = data06; } - b += 4; + b += 6; } + a += 6; + jj += 6; + j --; + } + + if (nmod6 & 4) { + + a1 = a; a += 4; + ii = 0; + + for (i = 0; i < m; i++) { + + if ((ii >= jj ) && (ii - jj < 4)) { + for (k = 0; k < ii - jj; k ++) { + *(b + k) = *(a1 + k); + } + + *(b + ii - jj) = INV(*(a1 + ii - jj)); + } + + if (ii - jj >= 4) { + *(b + 0) = *(a1 + 0); + *(b + 1) = *(a1 + 1); + *(b + 2) = *(a1 + 2); + *(b + 3) = *(a1 + 3); + } + + b += 4; + a1 += lda; + ii ++; + } + jj += 4; - j --; } - if (n & 2) { + + if (nmod6 & 2) { a1 = a + 0 * lda; a2 = a + 1 * lda; @@ -240,58 +475,58 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT #ifndef UNIT data01 = *(a1 + 0); #endif - data03 = *(a2 + 0); + + data09 = *(a2 + 0); #ifndef UNIT - data04 = *(a2 + 1); + data10 = *(a2 + 1); #endif *(b + 0) = INV(data01); - *(b + 2) = data03; - *(b + 3) = INV(data04); + *(b + 2) = data09; + *(b + 3) = INV(data10); } if (ii > jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); - data03 = *(a2 + 0); - data04 = *(a2 + 1); + data09 = *(a2 + 0); + data10 = *(a2 + 1); *(b + 0) = data01; *(b + 1) = data02; - *(b + 2) = data03; - *(b + 3) = data04; + *(b + 2) = data09; + *(b + 3) = data10; } a1 += 2 * lda; a2 += 2 * lda; b += 4; - i --; ii += 2; } - if ((m & 1) != 0) { - - if (ii== jj) { + if (m & 1) { + if (ii == jj) { #ifndef UNIT data01 = *(a1 + 0); #endif *(b + 0) = INV(data01); } - if (ii > jj) { + if (ii > jj) { data01 = *(a1 + 0); data02 = *(a1 + 1); + *(b + 0) = data01; *(b + 1) = data02; } - b += 2; + b += 2; } a += 2; jj += 2; } - if (n & 1) { + if (nmod6 & 1) { a1 = a + 0 * lda; i = m; @@ -310,9 +545,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *(b + 0) = data01; } - a1 += 1 * lda; + a1 += lda; b += 1; - i --; ii += 1; } diff --git a/kernel/loongarch64/KERNEL.LOONGSON3R5 b/kernel/loongarch64/KERNEL.LOONGSON3R5 index 20d0769f48..2c1ab87e59 100644 --- a/kernel/loongarch64/KERNEL.LOONGSON3R5 +++ b/kernel/loongarch64/KERNEL.LOONGSON3R5 @@ -85,11 +85,11 @@ ZSWAPKERNEL = cswap_lasx.S CSUMKERNEL = csum_lasx.S ZSUMKERNEL = csum_lasx.S -DGEMMKERNEL = dgemm_kernel_16x4.S +DGEMMKERNEL = dgemm_kernel_16x6.S DGEMMINCOPY = dgemm_ncopy_16.S DGEMMITCOPY = dgemm_tcopy_16.S -DGEMMONCOPY = dgemm_ncopy_4.S -DGEMMOTCOPY = dgemm_tcopy_4.S +DGEMMONCOPY = gemm_ncopy_6.prefx.c +DGEMMOTCOPY = dgemm_tcopy_6.S DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) @@ -153,10 +153,10 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -DTRSMKERNEL_LN = dtrsm_kernel_LN_16x4_lasx.S -DTRSMKERNEL_LT = dtrsm_kernel_LT_16x4_lasx.S -DTRSMKERNEL_RN = dtrsm_kernel_RN_16x4_lasx.S -DTRSMKERNEL_RT = dtrsm_kernel_RT_16x4_lasx.S +DTRSMKERNEL_LN = trsm_kernel_LN_UNROLLN6.c +DTRSMKERNEL_LT = trsm_kernel_LT_UNROLLN6.c +DTRSMKERNEL_RN = trsm_kernel_RN_UNROLLN6.c +DTRSMKERNEL_RT = trsm_kernel_RT_UNROLLN6.c STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c diff --git a/kernel/loongarch64/dgemm_kernel_16x6.S b/kernel/loongarch64/dgemm_kernel_16x6.S new file mode 100644 index 0000000000..90da107377 --- /dev/null +++ b/kernel/loongarch64/dgemm_kernel_16x6.S @@ -0,0 +1,6256 @@ +/******************************************************************************* +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#define ASSEMBLER + +#include "common.h" + +/* Function parameters */ +#define M $r4 // param 1: bm +#define N $r5 // param 2: bn +#define K $r6 // param 3: bk +#define ALPHA $f0 // param 4: alpha +#define A $r7 // param 5: ba +#define B $r8 // param 6: bb +#define C $r9 // param 7: bc +#define LDC $r10 // param 8: ldc + +#ifdef TRMMKERNEL +#define OFFSET $r11 // param 9: offset +#endif +#define OFF $r12 + +/* Cycle control parameters */ +#define I $r13 +#define J $r14 +#define L $r15 +#define TL $r16 +/* Matrix address */ +#define A0 $r17 +#define B0 $r18 +#define C0 $r19 +#define C1 $r20 +#define C2 $r23 +#define C3 $r24 +#define C4 $r25 +#define C5 $r26 +#define T0 $r27 /* !! DO NOT USE $r21 and $r22 !! */ +#define T1 $r28 +#define T2 $r29 +#define I48 $r30 +#define ZERO $r0 + +/* LASX vectors */ +#define U0 $xr0 +#define U1 $xr1 +#define U2 $xr2 +#define U3 $xr3 +#define U4 $xr4 +#define U5 $xr5 +#define U6 $xr6 +#define D0 $xr7 +#define D1 $xr8 +#define D2 $xr9 +#define D3 $xr10 +#define D4 $xr11 +#define D5 $xr12 +#define D6 $xr13 +#define D7 $xr14 +#define D8 $xr15 +#define D9 $xr16 +#define D10 $xr17 +#define D11 $xr18 +#define D12 $xr19 +#define D13 $xr20 +#define D14 $xr21 +#define D15 $xr22 +#define D16 $xr23 +#define D17 $xr24 +#define D18 $xr25 +#define D19 $xr26 +#define D20 $xr27 +#define D21 $xr28 +#define D22 $xr29 +#define D23 $xr30 +#define VALPHA $xr31 + +/* Prefetch interval */ +#define A_PRE 0x200 /* 0x200 / 0x80 = 4 */ +#define B_PRE 0x100 /* 0x100 / 0x30 = 4 */ + +.macro KERNEL_16x6 + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvldrepl.d U5, B0, 0x08 + xvldrepl.d U6, B0, 0x10 + + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + preld 0, B0, B_PRE + + xvfmadd.d D4, U0, U5, D4 + xvfmadd.d D5, U1, U5, D5 + xvfmadd.d D6, U2, U5, D6 + xvfmadd.d D7, U3, U5, D7 + + xvfmadd.d D8, U0, U6, D8 + xvfmadd.d D9, U1, U6, D9 + xvfmadd.d D10, U2, U6, D10 + xvfmadd.d D11, U3, U6, D11 + preld 0, A0, A_PRE + + xvldrepl.d U4, B0, 0x18 + xvldrepl.d U5, B0, 0x20 + xvldrepl.d U6, B0, 0x28 + + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + xvfmadd.d D16, U0, U5, D16 + xvfmadd.d D17, U1, U5, D17 + xvfmadd.d D18, U2, U5, D18 + xvfmadd.d D19, U3, U5, D19 + preld 0, A0, A_PRE + 0x40 + + xvfmadd.d D20, U0, U6, D20 + xvfmadd.d D21, U1, U6, D21 + xvfmadd.d D22, U2, U6, D22 + xvfmadd.d D23, U3, U6, D23 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x30 +.endm + + PROLOGUE + + addi.d $sp, $sp, -160 + /* Store $r23~$31 */ + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 24 + SDARG $r27, $sp, 32 + SDARG $r28, $sp, 40 + SDARG $r29, $sp, 48 + SDARG $r30, $sp, 56 + SDARG $r31, $sp, 64 + fst.d $f23, $sp, 72 + fst.d $f24, $sp, 80 + fst.d $f25, $sp, 96 + fst.d $f26, $sp, 104 + fst.d $f27, $sp, 112 + fst.d $f28, $sp, 120 + fst.d $f29, $sp, 128 + fst.d $f30, $sp, 136 + fst.d $f31, $sp, 144 + fst.d ALPHA, $sp, 152 + +#if defined (TRMMKERNEL) && !defined(LEFT) + sub.d OFF, ZERO, OFFSET +#else + xor OFF, OFF, OFF +#endif + + addi.d I48, ZERO, 48 + /* VALPHA = {ALPHA, ALPHA, ALPHA, ALPHA} */ + xvld VALPHA, $sp, 152 + xvreplve0.d VALPHA, VALPHA + xor T0, T0, T0 + addi.d T0, T0, 6 + /* if (!(N / 6)) goto L_N5 */ + div.d J, N, T0 /* J = bn / 6 */ + mul.d T0, J, T0 + sub.d N, N, T0 + beq ZERO, J, .L_N5 + +.L_J1: /* J-- && This loop include Condition 1 */ + +/************************* Condition 1 if((N / 6) && (M >> 4)) START !!! ************************* +* dgemm_core_16x6 */ + move C0, C + move A0, A + slli.d T0, LDC, 3 + add.d C1, C0, T0 + addi.d J, J, -1 /* J-- */ + add.d C2, C1, T0 + add.d C3, C2, T0 + add.d C4, C3, T0 + add.d C5, C4, T0 + +#if defined(TRMMKERNEL) && defined(LEFT) + move OFF, OFFSET +#endif + + /* if (!(M >> 3)) goto L_M8 */ + srai.d I, M, 4 /* I = bm >> 4 */ + beq ZERO, I, .L_M8 + +.L_I1: /* I-- */ +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x07 + add.d A0, A0, T0 + mul.d T0, OFF, I48 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 16 +#else + /* number of values in B */ + addi.d L, OFF, 6 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + /* Calculate the first set of D0~D23, + * avoidig set 0 operation + * Load 16 * 64 from A0 + * U0 = {a3, a2, a1, a0} + * U1 = {a7, a6, a5, a4} + * U2 = {a11, a10, a9, a8} + * U3 = {a15, a14, a13, a12} + */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvldrepl.d U5, B0, 0x08 + xvldrepl.d U6, B0, 0x10 + + preld 0, C0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + preld 0, C0, 0x40 + xvfmul.d D2, U2, U4 + xvfmul.d D3, U3, U4 + + preld 0, C1, 0x00 + /* line 2 */ + xvfmul.d D4, U0, U5 + xvfmul.d D5, U1, U5 + preld 0, C1, 0x40 + xvfmul.d D6, U2, U5 + xvfmul.d D7, U3, U5 + + preld 0, C2, 0x00 + /* line 3 */ + xvfmul.d D8, U0, U6 + xvfmul.d D9, U1, U6 + preld 0, C2, 0x40 + xvfmul.d D10, U2, U6 + xvfmul.d D11, U3, U6 + + xvldrepl.d U4, B0, 0x18 + xvldrepl.d U5, B0, 0x20 + xvldrepl.d U6, B0, 0x28 + + preld 0, C3, 0x00 + /* line 4 */ + xvfmul.d D12, U0, U4 + xvfmul.d D13, U1, U4 + preld 0, C3, 0x40 + xvfmul.d D14, U2, U4 + xvfmul.d D15, U3, U4 + + preld 0, C4, 0x00 + /* line 5 */ + xvfmul.d D16, U0, U5 + xvfmul.d D17, U1, U5 + preld 0, C4, 0x40 + xvfmul.d D18, U2, U5 + xvfmul.d D19, U3, U5 + + preld 0, C5, 0x00 + /* line 6 */ + xvfmul.d D20, U0, U6 + xvfmul.d D21, U1, U6 + preld 0, C5, 0x40 + xvfmul.d D22, U2, U6 + xvfmul.d D23, U3, U6 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x30 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_L7 */ + beq ZERO,TL, .L_L7 + + /* Calculate 8 sets of D0~D23 */ +.L_TL1: /* TL-- */ + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + KERNEL_16x6 + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_TL1 + + /* Maybe we need calculate the last + * 7 sets of D0~D23? + */ +.L_L7: + /* if (!(L & 7)) goto L_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_L0 + +.L_L71: + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + xvfmadd.d D18, U2, U4, D18 + xvfmadd.d D19, U3, U4, D19 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + xvfmadd.d D22, U2, U4, D22 + xvfmadd.d D23, U3, U4, D23 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_L71 + +.L_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D2, D2, VALPHA + xvfmul.d D3, D3, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA + xvfmul.d D6, D6, VALPHA + xvfmul.d D7, D7, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D9, D9, VALPHA + xvfmul.d D10, D10, VALPHA + xvfmul.d D11, D11, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D13, D13, VALPHA + xvfmul.d D14, D14, VALPHA + xvfmul.d D15, D15, VALPHA + xvfmul.d D16, D16, VALPHA + xvfmul.d D17, D17, VALPHA + xvfmul.d D18, D18, VALPHA + xvfmul.d D19, D19, VALPHA + xvfmul.d D20, D20, VALPHA + xvfmul.d D21, D21, VALPHA + xvfmul.d D22, D22, VALPHA + xvfmul.d D23, D23, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvld U2, C0, 0x40 + xvld U3, C0, 0x60 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + xvfmadd.d D2, D2, VALPHA, U2 + xvfmadd.d D3, D3, VALPHA, U3 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvld U2, C1, 0x40 + xvld U3, C1, 0x60 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 + xvfmadd.d D6, D6, VALPHA, U2 + xvfmadd.d D7, D7, VALPHA, U3 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvld U1, C2, 0x20 + xvld U2, C2, 0x40 + xvld U3, C2, 0x60 + xvfmadd.d D8, D8, VALPHA, U0 + xvfmadd.d D9, D9, VALPHA, U1 + xvfmadd.d D10, D10, VALPHA, U2 + xvfmadd.d D11, D11, VALPHA, U3 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvld U1, C3, 0x20 + xvld U2, C3, 0x40 + xvld U3, C3, 0x60 + xvfmadd.d D12, D12, VALPHA, U0 + xvfmadd.d D13, D13, VALPHA, U1 + xvfmadd.d D14, D14, VALPHA, U2 + xvfmadd.d D15, D15, VALPHA, U3 + + /* Load C4 */ + xvld U0, C4, 0x00 + xvld U1, C4, 0x20 + xvld U2, C4, 0x40 + xvld U3, C4, 0x60 + xvfmadd.d D16, D16, VALPHA, U0 + xvfmadd.d D17, D17, VALPHA, U1 + xvfmadd.d D18, D18, VALPHA, U2 + xvfmadd.d D19, D19, VALPHA, U3 + + /* Load C5 */ + xvld U0, C5, 0x00 + xvld U1, C5, 0x20 + xvld U2, C5, 0x40 + xvld U3, C5, 0x60 + xvfmadd.d D20, D20, VALPHA, U0 + xvfmadd.d D21, D21, VALPHA, U1 + xvfmadd.d D22, D22, VALPHA, U2 + xvfmadd.d D23, D23, VALPHA, U3 + #endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + xvst D2, C0, 0x40 + xvst D3, C0, 0x60 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + xvst D6, C1, 0x40 + xvst D7, C1, 0x60 + /* Store C2 */ + xvst D8, C2, 0x00 + xvst D9, C2, 0x20 + xvst D10, C2, 0x40 + xvst D11, C2, 0x60 + /* Store C3 */ + xvst D12, C3, 0x00 + xvst D13, C3, 0x20 + xvst D14, C3, 0x40 + xvst D15, C3, 0x60 + /* Store C4 */ + xvst D16, C4, 0x00 + xvst D17, C4, 0x20 + xvst D18, C4, 0x40 + xvst D19, C4, 0x60 + /* Store C5 */ + xvst D20, C5, 0x00 + xvst D21, C5, 0x20 + xvst D22, C5, 0x40 + xvst D23, C5, 0x60 + + /* Add stride for C */ + addi.d C0, C0, 0x80 + addi.d C1, C1, 0x80 + addi.d C2, C2, 0x80 + addi.d C3, C3, 0x80 + addi.d C4, C4, 0x80 + addi.d C5, C5, 0x80 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -16 +#else + /* number of values in B */ + addi.d L, L, -6 +#endif + slli.d T0, L, 0x07 + add.d A0, A0, T0 + mul.d T0, L, I48 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x10 +#endif +#endif // #if defined(TRMMKERNEL) + + addi.d I, I, -1 /* I-- */ + blt ZERO,I, .L_I1 + +.L_M8: + /* We have done M & 16, considering M=8/4/2/1 */ + andi I, M, 15 + beq ZERO,I, .L_M0 + + andi I, M, 8 + beq ZERO,I, .L_M4 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x06 + add.d A0, A0, T0 + mul.d T0, OFF, I48 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 8 +#else + /* number of values in B */ + addi.d L, OFF, 6 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif // #if defined(TRMMKERNEL) + + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + xvfmul.d D5, U1, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + xvfmul.d D9, U1, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + xvfmul.d D13, U1, U4 + + xvldrepl.d U4, B0, 0x20 + /* line 5 */ + xvfmul.d D16, U0, U4 + xvfmul.d D17, U1, U4 + + xvldrepl.d U4, B0, 0x28 + /* line 6 */ + xvfmul.d D20, U0, U4 + xvfmul.d D21, U1, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_M8_L7 */ + beq ZERO,TL, .L_M8_L7 + +.L_M8_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-2***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-3***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-4***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-5***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-6***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-7***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + /***8-8***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_M8_TL1 + +.L_M8_L7: + /* if (!(L & 7)) goto L_M8_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_M8_L0 + +.L_M8_L71: + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + xvfmadd.d D17, U1, U4, D17 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + xvfmadd.d D21, U1, U4, D21 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_M8_L71 + +.L_M8_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D9, D9, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D13, D13, VALPHA + xvfmul.d D16, D16, VALPHA + xvfmul.d D17, D17, VALPHA + xvfmul.d D20, D20, VALPHA + xvfmul.d D21, D21, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvld U1, C2, 0x20 + xvfmadd.d D8, D8, VALPHA, U0 + xvfmadd.d D9, D9, VALPHA, U1 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvld U1, C3, 0x20 + xvfmadd.d D12, D12, VALPHA, U0 + xvfmadd.d D13, D13, VALPHA, U1 + + /* Load C4 */ + xvld U0, C4, 0x00 + xvld U1, C4, 0x20 + xvfmadd.d D16, D16, VALPHA, U0 + xvfmadd.d D17, D17, VALPHA, U1 + + /* Load C5 */ + xvld U0, C5, 0x00 + xvld U1, C5, 0x20 + xvfmadd.d D20, D20, VALPHA, U0 + xvfmadd.d D21, D21, VALPHA, U1 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + /* Store C2 */ + xvst D8, C2, 0x00 + xvst D9, C2, 0x20 + /* Store C3 */ + xvst D12, C3, 0x00 + xvst D13, C3, 0x20 + /* Store C4 */ + xvst D16, C4, 0x00 + xvst D17, C4, 0x20 + /* Store C5 */ + xvst D20, C5, 0x00 + xvst D21, C5, 0x20 + + /* Add stride for C */ + addi.d C0, C0, 0x40 + addi.d C1, C1, 0x40 + addi.d C2, C2, 0x40 + addi.d C3, C3, 0x40 + addi.d C4, C4, 0x40 + addi.d C5, C5, 0x40 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -8 +#else + /* number of values in B */ + addi.d L, L, -6 +#endif + slli.d T0, L, 0x06 + add.d A0, A0, T0 + mul.d T0, L, I48 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x08 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N / 6 ) && (M & 8)) End************/ + +.L_M4: + andi I, M, 4 + beq ZERO,I, .L_M2 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x05 + add.d A0, A0, T0 + mul.d T0, OFF, I48 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 4 +#else + /* number of values in B */ + addi.d L, OFF, 6 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 4 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + xvldrepl.d U4, B0, 0x20 + /* line 5 */ + xvfmul.d D16, U0, U4 + + xvldrepl.d U4, B0, 0x28 + /* line 6 */ + xvfmul.d D20, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_M4_L7 */ + beq ZERO,TL, .L_M4_L7 + +.L_M4_TL1: /* TL-- */ + /***8-1***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_M4_TL1 + +.L_M4_L7: + /* if (!(L & 7)) goto L_M4_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_M4_L0 + +.L_M4_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_M4_L71 + +.L_M4_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D16, D16, VALPHA + xvfmul.d D20, D20, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 + + /* Load C4 */ + xvld U0, C4, 0x00 + xvfmadd.d D16, D16, VALPHA, U0 + + /* Load C5 */ + xvld U0, C5, 0x00 + xvfmadd.d D20, D20, VALPHA, U0 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + /* Store C1 */ + xvst D4, C1, 0x00 + /* Store C2 */ + xvst D8, C2, 0x00 + /* Store C3 */ + xvst D12, C3, 0x00 + /* Store C4 */ + xvst D16, C4, 0x00 + /* Store C5 */ + xvst D20, C5, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x20 + addi.d C1, C1, 0x20 + addi.d C2, C2, 0x20 + addi.d C3, C3, 0x20 + addi.d C4, C4, 0x20 + addi.d C5, C5, 0x20 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -4 +#else + /* number of values in B */ + addi.d L, L, -6 +#endif + slli.d T0, L, 0x05 + add.d A0, A0, T0 + mul.d T0, L, I48 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x04 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N / 6 ) && (M & 4) ) End************/ + +.L_M2: + andi I, M, 2 + beq ZERO,I, .L_M1 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x04 + add.d A0, A0, T0 + mul.d T0, OFF, I48 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 2 +#else + /* number of values in B */ + addi.d L, OFF, 6 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + xvldrepl.d U4, B0, 0x20 + /* line 5 */ + xvfmul.d D16, U0, U4 + + xvldrepl.d U4, B0, 0x28 + /* line 6 */ + xvfmul.d D20, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_M2_L7 */ + beq ZERO,TL, .L_M2_L7 + +.L_M2_TL1: /* TL-- */ + /***8-1***/ + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_M2_TL1 + +.L_M2_L7: + /* if (!(L & 7)) goto L_M2_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_M2_L0 + +.L_M2_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_M2_L71 + +.L_M2_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D16, D16, VALPHA + xvfmul.d D20, D20, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 + + /* Load C4 */ + xvld U0, C4, 0x00 + xvfmadd.d D16, D16, VALPHA, U0 + + /* Load C5 */ + xvld U0, C5, 0x00 + xvfmadd.d D20, D20, VALPHA, U0 +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + xvstelm.d D8, C2, 0x00, 0x00 + xvstelm.d D12, C3, 0x00, 0x00 + xvstelm.d D16, C4, 0x00, 0x00 + xvstelm.d D20, C5, 0x00, 0x00 + xvstelm.d D0, C0, 0x08, 0x01 + xvstelm.d D4, C1, 0x08, 0x01 + xvstelm.d D8, C2, 0x08, 0x01 + xvstelm.d D12, C3, 0x08, 0x01 + xvstelm.d D16, C4, 0x08, 0x01 + xvstelm.d D20, C5, 0x08, 0x01 + + /* Add stride for C */ + addi.d C0, C0, 0x10 + addi.d C1, C1, 0x10 + addi.d C2, C2, 0x10 + addi.d C3, C3, 0x10 + addi.d C4, C4, 0x10 + addi.d C5, C5, 0x10 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -2 +#else + /* number of values in B */ + addi.d L, L, -6 +#endif + slli.d T0, L, 0x04 + add.d A0, A0, T0 + mul.d T0, L, I48 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x02 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N / 6 ) && (M & 2) ) End************/ + +.L_M1: + andi I, M, 1 + beq ZERO,I, .L_M0 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x03 + add.d A0, A0, T0 + mul.d T0, OFF, I48 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 1 +#else + /* number of values in B */ + addi.d L, OFF, 6 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + xvldrepl.d U4, B0, 0x20 + /* line 5 */ + xvfmul.d D16, U0, U4 + + xvldrepl.d U4, B0, 0x28 + /* line 6 */ + xvfmul.d D20, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_M1_L7 */ + beq ZERO,TL, .L_M1_L7 + +.L_M1_TL1: /* TL-- */ + /***8-1***/ + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_M1_TL1 + +.L_M1_L7: + /* if (!(L & 7)) goto L_M1_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_M1_L0 + +.L_M1_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + xvldrepl.d U4, B0, 0x20 + xvfmadd.d D16, U0, U4, D16 + + xvldrepl.d U4, B0, 0x28 + xvfmadd.d D20, U0, U4, D20 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x30 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_M1_L71 + +.L_M1_L0: +#ifdef TRMMKERNEL + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D16, D16, VALPHA + xvfmul.d D20, D20, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 + + /* Load C4 */ + xvld U0, C4, 0x00 + xvfmadd.d D16, D16, VALPHA, U0 + + /* Load C5 */ + xvld U0, C5, 0x00 + xvfmadd.d D20, D20, VALPHA, U0 +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + xvstelm.d D8, C2, 0x00, 0x00 + xvstelm.d D12, C3, 0x00, 0x00 + xvstelm.d D16, C4, 0x00, 0x00 + xvstelm.d D20, C5, 0x00, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x08 + addi.d C1, C1, 0x08 + addi.d C2, C2, 0x08 + addi.d C3, C3, 0x08 + addi.d C4, C4, 0x08 + addi.d C5, C5, 0x08 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -1 +#else + /* number of values in B */ + addi.d L, L, -6 +#endif + slli.d T0, L, 0x03 + add.d A0, A0, T0 + mul.d T0, L, I48 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x01 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N / 6 ) && (M & 1) ) End************/ + +.L_M0: + /* Add stride for B and C + * B += (K * 6) + * C += (LDC * 6) + */ + /* since the array type is double, + * so we must mul 48 + */ + addi.d T2, ZERO,48 + mul.d T0, K, T2 + mul.d T1, LDC, T2 + add.d B, B, T0 + add.d C, C, T1 + +#if defined(TRMMKERNEL) && !defined(LEFT) + addi.d OFF, OFF, 0x06 +#endif + + blt ZERO, J, .L_J1 + +//////////////// go back to L_J1 ///////////////// +///////////////////////////////////////////////// +/************************ Condition 1 if((N >> 2) && (M >> 3)) END !!! ************************/ + +.L_N5: + andi J, N, 4 + beq ZERO, J, .L_N3 + +/************************* Condition 2 if((N & 4) && (M >> 4)) START !!! ************************* +* dgemm_core_16x4 */ + + move C0, C + move A0, A + slli.d T0, LDC, 3 + add.d C1, C0, T0 + add.d C2, C1, T0 + add.d C3, C2, T0 + +#if defined(TRMMKERNEL) && defined(LEFT) + move OFF, OFFSET +#endif + + /* if (!(M >> 3)) goto L_N5_M8 */ + srai.d I, M, 4 /* I = bm >> 4 */ + beq ZERO, I, .L_N5_M8 + +.L_N5_I1: +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x07 + add.d A0, A0, T0 + slli.d T0, OFF, 0x05 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 16 +#else + /* number of values in B */ + addi.d L, OFF, 4 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + /* Load 16 * 64 from A0 + * U0 = {a3, a2, a1, a0} + * U1 = {a7, a6, a5, a4} + * U2 = {a11, a10, a9, a8} + * U3 = {a15, a14, a13, a12} + */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + xvfmul.d D2, U2, U4 + xvfmul.d D3, U3, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + xvfmul.d D5, U1, U4 + xvfmul.d D6, U2, U4 + xvfmul.d D7, U3, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + xvfmul.d D9, U1, U4 + xvfmul.d D10, U2, U4 + xvfmul.d D11, U3, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + xvfmul.d D13, U1, U4 + xvfmul.d D14, U2, U4 + xvfmul.d D15, U3, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N5_L7 */ + beq ZERO,TL, .L_N5_L7 + +.L_N5_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-2***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-3***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-4***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-5***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-6***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-7***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + /***8-8***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N5_TL1 + +.L_N5_L7: + /* if (!(L & 7)) goto L_N5_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N5_L0 + +.L_N5_L71: + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + xvfmadd.d D10, U2, U4, D10 + xvfmadd.d D11, U3, U4, D11 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + xvfmadd.d D14, U2, U4, D14 + xvfmadd.d D15, U3, U4, D15 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N5_L71 + +.L_N5_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D2, D2, VALPHA + xvfmul.d D3, D3, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA + xvfmul.d D6, D6, VALPHA + xvfmul.d D7, D7, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D9, D9, VALPHA + xvfmul.d D10, D10, VALPHA + xvfmul.d D11, D11, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D13, D13, VALPHA + xvfmul.d D14, D14, VALPHA + xvfmul.d D15, D15, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvld U2, C0, 0x40 + xvld U3, C0, 0x60 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + xvfmadd.d D2, D2, VALPHA, U2 + xvfmadd.d D3, D3, VALPHA, U3 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvld U2, C1, 0x40 + xvld U3, C1, 0x60 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 + xvfmadd.d D6, D6, VALPHA, U2 + xvfmadd.d D7, D7, VALPHA, U3 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvld U1, C2, 0x20 + xvld U2, C2, 0x40 + xvld U3, C2, 0x60 + xvfmadd.d D8, D8, VALPHA, U0 + xvfmadd.d D9, D9, VALPHA, U1 + xvfmadd.d D10, D10, VALPHA, U2 + xvfmadd.d D11, D11, VALPHA, U3 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvld U1, C3, 0x20 + xvld U2, C3, 0x40 + xvld U3, C3, 0x60 + xvfmadd.d D12, D12, VALPHA, U0 + xvfmadd.d D13, D13, VALPHA, U1 + xvfmadd.d D14, D14, VALPHA, U2 + xvfmadd.d D15, D15, VALPHA, U3 + #endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + xvst D2, C0, 0x40 + xvst D3, C0, 0x60 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + xvst D6, C1, 0x40 + xvst D7, C1, 0x60 + /* Store C2 */ + xvst D8, C2, 0x00 + xvst D9, C2, 0x20 + xvst D10, C2, 0x40 + xvst D11, C2, 0x60 + /* Store C3 */ + xvst D12, C3, 0x00 + xvst D13, C3, 0x20 + xvst D14, C3, 0x40 + xvst D15, C3, 0x60 + + /* Add stride for C */ + addi.d C0, C0, 0x80 + addi.d C1, C1, 0x80 + addi.d C2, C2, 0x80 + addi.d C3, C3, 0x80 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -16 +#else + /* number of values in B */ + addi.d L, L, -4 +#endif + slli.d T0, L, 0x07 + add.d A0, A0, T0 + slli.d T0, L, 0x05 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x10 +#endif +#endif // #if defined(TRMMKERNEL) + + addi.d I, I, -1 /* I-- */ + blt ZERO,I, .L_N5_I1 + +.L_N5_M8: + /* We have done M & 16, considering M=8/4/2/1 */ + andi I, M, 15 + beq ZERO,I, .L_N5_M0 + + andi I, M, 8 + beq ZERO,I, .L_N5_M4 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x06 + add.d A0, A0, T0 + slli.d T0, OFF, 0x05 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 8 +#else + /* number of values in B */ + addi.d L, OFF, 4 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif // #if defined(TRMMKERNEL) + + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + xvfmul.d D5, U1, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + xvfmul.d D9, U1, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + xvfmul.d D13, U1, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N5_M8_L7 */ + beq ZERO,TL, .L_N5_M8_L7 + +.L_N5_M8_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-2***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-3***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-4***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-5***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-6***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-7***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + /***8-8***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N5_M8_TL1 + +.L_N5_M8_L7: + /* if (!(L & 7)) goto L_N5_M8_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N5_M8_L0 + +.L_N5_M8_L71: + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + xvfmadd.d D9, U1, U4, D9 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + xvfmadd.d D13, U1, U4, D13 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N5_M8_L71 + +.L_N5_M8_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D9, D9, VALPHA + xvfmul.d D12, D12, VALPHA + xvfmul.d D13, D13, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvld U1, C2, 0x20 + xvfmadd.d D8, D8, VALPHA, U0 + xvfmadd.d D9, D9, VALPHA, U1 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvld U1, C3, 0x20 + xvfmadd.d D12, D12, VALPHA, U0 + xvfmadd.d D13, D13, VALPHA, U1 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + /* Store C2 */ + xvst D8, C2, 0x00 + xvst D9, C2, 0x20 + /* Store C3 */ + xvst D12, C3, 0x00 + xvst D13, C3, 0x20 + + /* Add stride for C */ + addi.d C0, C0, 0x40 + addi.d C1, C1, 0x40 + addi.d C2, C2, 0x40 + addi.d C3, C3, 0x40 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -8 +#else + /* number of values in B */ + addi.d L, L, -4 +#endif + slli.d T0, L, 0x06 + add.d A0, A0, T0 + slli.d T0, L, 0x05 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x08 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 4 ) && (M & 8) ) End************/ + +.L_N5_M4: + andi I, M, 4 + beq ZERO,I, .L_N5_M2 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x05 + add.d A0, A0, T0 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 4 +#else + /* number of values in B */ + addi.d L, OFF, 4 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 4 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N5_M4_L7 */ + beq ZERO,TL, .L_N5_M4_L7 + +.L_N5_M4_TL1: /* TL-- */ + /***8-1***/ + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-5***/ + xvld U0, A0, 0x00 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N5_M4_TL1 + +.L_N5_M4_L7: + /* if (!(L & 7)) goto L_N5_M4_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N5_M4_L0 + +.L_N5_M4_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N5_M4_L71 + +.L_N5_M4_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + /* Store C1 */ + xvst D4, C1, 0x00 + /* Store C2 */ + xvst D8, C2, 0x00 + /* Store C3 */ + xvst D12, C3, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x20 + addi.d C1, C1, 0x20 + addi.d C2, C2, 0x20 + addi.d C3, C3, 0x20 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -4 +#else + /* number of values in B */ + addi.d L, L, -4 +#endif + slli.d T0, L, 0x05 + add.d A0, A0, T0 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x04 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 4 ) && (M & 4) ) End************/ + +.L_N5_M2: + andi I, M, 2 + beq ZERO,I, .L_N5_M1 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x04 + add.d A0, A0, T0 + slli.d T0, OFF, 0x05 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 2 +#else + /* number of values in B */ + addi.d L, OFF, 4 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N5_M2_L7 */ + beq ZERO,TL, .L_N5_M2_L7 + +.L_N5_M2_TL1: /* TL-- */ + /***8-1***/ + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N5_M2_TL1 + +.L_N5_M2_L7: + /* if (!(L & 7)) goto L_N5_M2_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N5_M2_L0 + +.L_N5_M2_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N5_M2_L71 + +.L_N5_M2_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA + #else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 + #endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + xvstelm.d D8, C2, 0x00, 0x00 + xvstelm.d D12, C3, 0x00, 0x00 + xvstelm.d D0, C0, 0x08, 0x01 + xvstelm.d D4, C1, 0x08, 0x01 + xvstelm.d D8, C2, 0x08, 0x01 + xvstelm.d D12, C3, 0x08, 0x01 + + /* Add stride for C */ + addi.d C0, C0, 0x10 + addi.d C1, C1, 0x10 + addi.d C2, C2, 0x10 + addi.d C3, C3, 0x10 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -2 +#else + /* number of values in B */ + addi.d L, L, -4 +#endif + slli.d T0, L, 0x04 + add.d A0, A0, T0 + slli.d T0, L, 0x05 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x02 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 4 ) && (M & 2) ) End************/ + +.L_N5_M1: + andi I, M, 1 + beq ZERO,I, .L_N5_M0 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x03 + add.d A0, A0, T0 + slli.d T0, OFF, 0x05 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 1 +#else + /* number of values in B */ + addi.d L, OFF, 4 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + xvldrepl.d U4, B0, 0x10 + /* line 3 */ + xvfmul.d D8, U0, U4 + + xvldrepl.d U4, B0, 0x18 + /* line 4 */ + xvfmul.d D12, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N5_M1_L7 */ + beq ZERO,TL, .L_N5_M1_L7 + +.L_N5_M1_TL1: /* TL-- */ + /***8-1***/ + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N5_M1_TL1 + +.L_N5_M1_L7: + /* if (!(L & 7)) goto L_N5_M1_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N5_M1_L0 + +.L_N5_M1_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + xvldrepl.d U4, B0, 0x10 + xvfmadd.d D8, U0, U4, D8 + + xvldrepl.d U4, B0, 0x18 + xvfmadd.d D12, U0, U4, D12 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x20 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N5_M1_L71 + +.L_N5_M1_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D8, D8, VALPHA + xvfmul.d D12, D12, VALPHA + #else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 + + /* Load C2 */ + xvld U0, C2, 0x00 + xvfmadd.d D8, D8, VALPHA, U0 + + /* Load C3 */ + xvld U0, C3, 0x00 + xvfmadd.d D12, D12, VALPHA, U0 +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + xvstelm.d D8, C2, 0x00, 0x00 + xvstelm.d D12, C3, 0x00, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x08 + addi.d C1, C1, 0x08 + addi.d C2, C2, 0x08 + addi.d C3, C3, 0x08 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + /* number of values in A */ + addi.d L, L, -1 +#else + /* number of values in B */ + addi.d L, L, -4 +#endif + slli.d T0, L, 0x03 + add.d A0, A0, T0 + slli.d T0, L, 0x05 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + /* number of values in A */ + addi.d OFF, OFF, 0x01 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 4 ) && (M & 1) ) End************/ + +.L_N5_M0: + /* Add stride for B and C + * B += (K * 32) + * C += (LDC * 32) + */ + /* since the array type is double, + * so we must mul 32 + */ + addi.d T2, ZERO,32 + mul.d T0, K, T2 + mul.d T1, LDC, T2 + add.d B, B, T0 + add.d C, C, T1 + +#if defined(TRMMKERNEL) && !defined(LEFT) + addi.d OFF, OFF, 0x04 +#endif + + /* We must reinit I */ + srai.d I, M, 4 /* I = bm >> 4 */ + +/************************* Condition 2 if((N & 4) && (M >> 4)) End !!! ************************* +* dgemm_core_16x4 */ + +.L_N3: + andi J, N, 2 + beq ZERO, J, .L_N1 + +/************************* Condition 3 if((N & 2) && (M >> 4)) START !!! ************************* +* dgemm_core_16x2 */ + + move C0, C + move A0, A + slli.d T0, LDC, 3 + add.d C1, C0, T0 + +#if defined(TRMMKERNEL) && defined(LEFT) + move OFF, OFFSET +#endif + + /* if (!(M >> 4)) goto L_N3_M8 */ + srai.d I, M, 4 /* I = bm >> 4 */ + beq ZERO, I, .L_N3_M8 + +.L_N3_I1: +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x07 + add.d A0, A0, T0 + slli.d T0, OFF, 0x04 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 16 +#else + /* number of values in B */ + addi.d L, OFF, 2 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 16 * 64 from A0 + * U0 = {a3, a2, a1, a0} + * U1 = {a7, a6, a5, a4} + * U2 = {a11, a10, a9, a8} + * U3 = {a15, a14, a13, a12} + */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + xvfmul.d D2, U2, U4 + xvfmul.d D3, U3, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + xvfmul.d D5, U1, U4 + xvfmul.d D6, U2, U4 + xvfmul.d D7, U3, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N3_L7 */ + beq ZERO,TL, .L_N3_L7 + +.L_N3_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-2***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-3***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-4***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-5***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-6***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-7***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + /***8-8***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N3_TL1 + +.L_N3_L7: + /* if (!(L & 7)) goto L_N3_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N3_L0 + +.L_N3_L71: + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + xvfmadd.d D6, U2, U4, D6 + xvfmadd.d D7, U3, U4, D7 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N3_L71 + +.L_N3_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D2, D2, VALPHA + xvfmul.d D3, D3, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA + xvfmul.d D6, D6, VALPHA + xvfmul.d D7, D7, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvld U2, C0, 0x40 + xvld U3, C0, 0x60 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + xvfmadd.d D2, D2, VALPHA, U2 + xvfmadd.d D3, D3, VALPHA, U3 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvld U2, C1, 0x40 + xvld U3, C1, 0x60 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 + xvfmadd.d D6, D6, VALPHA, U2 + xvfmadd.d D7, D7, VALPHA, U3 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + xvst D2, C0, 0x40 + xvst D3, C0, 0x60 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + xvst D6, C1, 0x40 + xvst D7, C1, 0x60 + + /* Add stride for C */ + addi.d C0, C0, 0x80 + addi.d C1, C1, 0x80 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -16 +#else + addi.d L, L, -2 +#endif + slli.d T0, L, 0x07 + add.d A0, A0, T0 + slli.d T0, L, 0x04 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x10 +#endif +#endif // #if defined(TRMMKERNEL) + + addi.d I, I, -1 /* I-- */ + blt ZERO,I, .L_N3_I1 + +.L_N3_M8: + /* We have done M & 16, considering M=8/4/2/1 */ + andi I, M, 15 + beq ZERO,I, .L_N3_M0 + + andi I, M, 8 + beq ZERO,I, .L_N3_M4 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x06 + add.d A0, A0, T0 + slli.d T0, OFF, 0x04 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 8 +#else + /* number of values in B */ + addi.d L, OFF, 2 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + xvfmul.d D5, U1, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N3_M8_L7 */ + beq ZERO,TL, .L_N3_M8_L7 + +.L_N3_M8_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-2***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-3***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-4***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-5***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-6***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-7***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + /***8-8***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N3_M8_TL1 + +.L_N3_M8_L7: + /* if (!(L & 7)) goto L_N3_M8_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N3_M8_L0 + +.L_N3_M8_L71: + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + xvfmadd.d D5, U1, U4, D5 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N3_M8_L71 + +.L_N3_M8_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D4, D4, VALPHA + xvfmul.d D5, D5, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + + /* Load C1 */ + xvld U0, C1, 0x00 + xvld U1, C1, 0x20 + xvfmadd.d D4, D4, VALPHA, U0 + xvfmadd.d D5, D5, VALPHA, U1 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + /* Store C1 */ + xvst D4, C1, 0x00 + xvst D5, C1, 0x20 + + /* Add stride for C */ + addi.d C0, C0, 0x40 + addi.d C1, C1, 0x40 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -8 +#else + addi.d L, L, -2 +#endif + slli.d T0, L, 0x06 + add.d A0, A0, T0 + slli.d T0, L, 0x04 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x08 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2) && (M & 8) ) End************/ + +.L_N3_M4: + andi I, M, 4 + beq ZERO,I, .L_N3_M2 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x05 + add.d A0, A0, T0 + slli.d T0, OFF, 0x04 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 4 +#else + /* number of values in B */ + addi.d L, OFF, 2 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 4 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N3_M4_L7 */ + beq ZERO,TL, .L_N3_M4_L7 + +.L_N3_M4_TL1: /* TL-- */ + /***8-1***/ + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-5***/ + xvld U0, A0, 0x00 + + /* Cumulative D0~D23 */ + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N3_M4_TL1 + +.L_N3_M4_L7: + /* if (!(L & 7)) goto L_N3_M4_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N3_M4_L0 + +.L_N3_M4_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N3_M4_L71 + +.L_N3_M4_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + /* Store C1 */ + xvst D4, C1, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x20 + addi.d C1, C1, 0x20 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -4 +#else + addi.d L, L, -2 +#endif + slli.d T0, L, 0x05 + add.d A0, A0, T0 + slli.d T0, L, 0x04 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x04 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 4) ) End************/ + +.L_N3_M2: + andi I, M, 2 + beq ZERO,I, .L_N3_M1 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x04 + add.d A0, A0, T0 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 2 +#else + /* number of values in B */ + addi.d L, OFF, 2 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N3_M2_L7 */ + beq ZERO,TL, .L_N3_M2_L7 + +.L_N3_M2_TL1: /* TL-- */ + /***8-1***/ + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N3_M2_TL1 + +.L_N3_M2_L7: + /* if (!(L & 7)) goto L_N3_M2_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N3_M2_L0 + +.L_N3_M2_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N3_M2_L71 + +.L_N3_M2_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + xvstelm.d D0, C0, 0x08, 0x01 + xvstelm.d D4, C1, 0x08, 0x01 + + /* Add stride for C */ + addi.d C0, C0, 0x10 + addi.d C1, C1, 0x10 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -2 +#else + addi.d L, L, -2 +#endif + slli.d T0, L, 0x04 + add.d A0, A0, T0 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x02 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 2) ) End************/ + +.L_N3_M1: + andi I, M, 1 + beq ZERO,I, .L_N3_M0 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x03 + add.d A0, A0, T0 + slli.d T0, OFF, 0x04 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 1 +#else + /* number of values in B */ + addi.d L, OFF, 2 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + xvldrepl.d U4, B0, 0x08 + /* line 2 */ + xvfmul.d D4, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N3_M1_L7 */ + beq ZERO,TL, .L_N3_M1_L7 + +.L_N3_M1_TL1: /* TL-- */ + /***8-1***/ + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N3_M1_TL1 + +.L_N3_M1_L7: + /* if (!(L & 7)) goto L_N3_M1_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N3_M1_L0 + +.L_N3_M1_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + xvldrepl.d U4, B0, 0x08 + xvfmadd.d D4, U0, U4, D4 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x10 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N3_M1_L71 + +.L_N3_M1_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D4, D4, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + + /* Load C1 */ + xvld U0, C1, 0x00 + xvfmadd.d D4, D4, VALPHA, U0 +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D4, C1, 0x00, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x08 + addi.d C1, C1, 0x08 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -1 +#else + addi.d L, L, -2 +#endif + slli.d T0, L, 0x03 + add.d A0, A0, T0 + slli.d T0, L, 0x04 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x01 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 1) ) End************/ + +.L_N3_M0: + /* Add stride for B and C + * B += (K * 16) + * C += (LDC * 16) + */ + /* since the array type is double, + * so we must mul 32 + */ + addi.d T2, ZERO,16 + mul.d T0, K, T2 + mul.d T1, LDC, T2 + add.d B, B, T0 + add.d C, C, T1 + +#if defined(TRMMKERNEL) && !defined(LEFT) + addi.d OFF, OFF, 0x02 +#endif + + /* We must reinit I */ + srai.d I, M, 4 /* I = bm >> 4 */ + +/************************* Condition 3 if((N & 2) && (M >> 4)) End !!! ************************* +* dgemm_core_16x2 */ + +.L_N1: + andi J, N, 1 + beq ZERO, J, .L_N0 + +/************************* Condition 4 if((N & 1) && (M >> 4)) START !!! ************************* +* dgemm_core_16x1 */ + + move C0, C + move A0, A + +#if defined(TRMMKERNEL) && defined(LEFT) + move OFF, OFFSET +#endif + + /* if (!(M >> 4)) goto L_N1_M8 */ + srai.d I, M, 4 /* I = bm >> 4 */ + beq ZERO, I, .L_N1_M8 + +.L_N1_I1: +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x07 + add.d A0, A0, T0 + slli.d T0, OFF, 0x03 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 16 +#else + /* number of values in B */ + addi.d L, OFF, 1 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 16 * 64 from A0 + * U0 = {a3, a2, a1, a0} + * U1 = {a7, a6, a5, a4} + * U2 = {a11, a10, a9, a8} + * U3 = {a15, a14, a13, a12} + */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + xvfmul.d D2, U2, U4 + xvfmul.d D3, U3, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N1_L7 */ + beq ZERO,TL, .L_N1_L7 + +.L_N1_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-2***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-3***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-4***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-5***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-6***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-7***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + /***8-8***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N1_TL1 + +.L_N1_L7: + /* if (!(L & 7)) goto L_N1_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N1_L0 + +.L_N1_L71: + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + xvld U2, A0, 0x40 + xvld U3, A0, 0x60 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + xvfmadd.d D2, U2, U4, D2 + xvfmadd.d D3, U3, U4, D3 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x80 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N1_L71 + +.L_N1_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA + xvfmul.d D2, D2, VALPHA + xvfmul.d D3, D3, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvld U2, C0, 0x40 + xvld U3, C0, 0x60 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 + xvfmadd.d D2, D2, VALPHA, U2 + xvfmadd.d D3, D3, VALPHA, U3 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + xvst D2, C0, 0x40 + xvst D3, C0, 0x60 + + /* Add stride for C */ + addi.d C0, C0, 0x80 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -16 +#else + addi.d L, L, -1 +#endif + slli.d T0, L, 0x07 + add.d A0, A0, T0 + slli.d T0, L, 0x03 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x10 +#endif +#endif // #if defined(TRMMKERNEL) + + addi.d I, I, -1 /* I-- */ + blt ZERO,I, .L_N1_I1 + +.L_N1_M8: + /* We have done M & 16, considering M=8/4/2/1 */ + andi I, M, 15 + beq ZERO,I, .L_N1_M0 + + andi I, M, 8 + beq ZERO,I, .L_N1_M4 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x06 + add.d A0, A0, T0 + slli.d T0, OFF, 0x03 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 8 +#else + /* number of values in B */ + addi.d L, OFF, 1 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 8 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + xvfmul.d D1, U1, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N1_M8_L7 */ + beq ZERO,TL, .L_N1_M8_L7 + +.L_N1_M8_TL1: /* TL-- */ + /***8-1***/ + /* Load 16 * 64 from A0 */ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-2***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-3***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-4***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-5***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-6***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-7***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + /***8-8***/ + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N1_M8_TL1 + +.L_N1_M8_L7: + /* if (!(L & 7)) goto L_N1_M8_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N1_M8_L0 + +.L_N1_M8_L71: + xvld U0, A0, 0x00 + xvld U1, A0, 0x20 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + xvfmadd.d D1, U1, U4, D1 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x40 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N1_M8_L71 + +.L_N1_M8_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA + xvfmul.d D1, D1, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvld U1, C0, 0x20 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + xvfmadd.d D1, D1, VALPHA, U1 +#endif + + /* Store C0 */ + xvst D0, C0, 0x00 + xvst D1, C0, 0x20 + + /* Add stride for C */ + addi.d C0, C0, 0x40 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -8 +#else + addi.d L, L, -1 +#endif + slli.d T0, L, 0x06 + add.d A0, A0, T0 + slli.d T0, L, 0x03 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x08 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2) && (M & 8) ) End************/ + +.L_N1_M4: + andi I, M, 4 + beq ZERO,I, .L_N1_M2 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x05 + add.d A0, A0, T0 + slli.d T0, OFF, 0x03 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 4 +#else + /* number of values in B */ + addi.d L, OFF, 1 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 4 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N1_M4_L7 */ + beq ZERO,TL, .L_N1_M4_L7 + +.L_N1_M4_TL1: /* TL-- */ + /***8-1***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N1_M4_TL1 + +.L_N1_M4_L7: + /* if (!(L & 7)) goto L_N1_M4_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N1_M4_L0 + +.L_N1_M4_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x20 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N1_M4_L71 + +.L_N1_M4_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ + #endif + + /* Store C0 */ + xvst D0, C0, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x20 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -4 +#else + addi.d L, L, -1 +#endif + slli.d T0, L, 0x05 + add.d A0, A0, T0 + slli.d T0, L, 0x03 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x04 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 4) ) End************/ + +.L_N1_M2: + andi I, M, 2 + beq ZERO,I, .L_N1_M1 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x04 + add.d A0, A0, T0 + slli.d T0, OFF, 0x03 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 2 +#else + /* number of values in B */ + addi.d L, OFF, 1 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N1_M2_L7 */ + beq ZERO,TL, .L_N1_M2_L7 + +.L_N1_M2_TL1: /* TL-- */ + /***8-1***/ + /* Load 2 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N1_M2_TL1 + +.L_N1_M2_L7: + /* if (!(L & 7)) goto L_N1_M2_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N1_M2_L0 + +.L_N1_M2_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x10 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N1_M2_L71 + +.L_N1_M2_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + xvstelm.d D0, C0, 0x08, 0x01 + + /* Add stride for C */ + addi.d C0, C0, 0x10 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -2 +#else + addi.d L, L, -1 +#endif + slli.d T0, L, 0x04 + add.d A0, A0, T0 + slli.d T0, L, 0x03 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x02 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 2) ) End************/ + +.L_N1_M1: + andi I, M, 1 + beq ZERO,I, .L_N1_M0 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + move B0, B +#else + slli.d T0, OFF, 0x03 + add.d A0, A0, T0 + add.d B0, B, T0 +#endif + +#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + sub.d L, K, OFF +#elif defined(LEFT) + /* number of values in A */ + addi.d L, OFF, 1 +#else + /* number of values in B */ + addi.d L, OFF, 1 +#endif +#else // #if !defined(TRMMKERNEL) + move B0, B + move L, K /* L = bk */ +#endif + + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + /* line 1 */ + xvfmul.d D0, U0, U4 + + /* Add stride for A0 and B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + /* Reduce L */ + addi.d L, L, -1 + srai.d TL, L, 3 /* TL = (L-1) >> 3 */ + /* if (TL < 1) goto L_N1_M1_L7 */ + beq ZERO,TL, .L_N1_M1_L7 + +.L_N1_M1_TL1: /* TL-- */ + /***8-1***/ + /* Load 1 * 64 from A0 */ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-2***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-3***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-4***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-5***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-6***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-7***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + /***8-8***/ + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 /* TL-- */ + blt ZERO,TL, .L_N1_M1_TL1 + +.L_N1_M1_L7: + /* if (!(L & 7)) goto L_N1_M1_L0 */ + andi TL, L, 7 + beq TL, ZERO,.L_N1_M1_L0 + +.L_N1_M1_L71: + xvld U0, A0, 0x00 + + xvldrepl.d U4, B0, 0x00 + xvfmadd.d D0, U0, U4, D0 + + /* Add stride for A0, B0 */ + addi.d A0, A0, 0x08 + addi.d B0, B0, 0x08 + + addi.d TL, TL, -1 + blt ZERO,TL, .L_N1_M1_L71 + +.L_N1_M1_L0: +#if defined(TRMMKERNEL) + xvfmul.d D0, D0, VALPHA +#else + /* Load C0 */ + xvld U0, C0, 0x00 + xvfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */ +#endif + + xvstelm.d D0, C0, 0x00, 0x00 + + /* Add stride for C */ + addi.d C0, C0, 0x08 + +#if defined(TRMMKERNEL) +#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) + sub.d L, K, OFF +#ifdef LEFT + addi.d L, L, -1 +#else + addi.d L, L, -1 +#endif + slli.d T0, L, 0x03 + add.d A0, A0, T0 + add.d B0, B0, T0 +#endif + +#ifdef LEFT + addi.d OFF, OFF, 0x01 +#endif +#endif // #if defined(TRMMKERNEL) + +/********LOOP (if(N & 2 ) && (M & 1) ) End************/ + +.L_N1_M0: + +/************************* Condition 4 if((N & 1) && (M >> 4)) End !!! ************************* +* dgemm_core_16x1 */ + +.L_N0: + /* Restore $r23~$31 */ + LDARG $r23, $sp, 0 + LDARG $r24, $sp, 8 + LDARG $r25, $sp, 16 + LDARG $r26, $sp, 24 + LDARG $r27, $sp, 32 + LDARG $r28, $sp, 40 + LDARG $r29, $sp, 48 + LDARG $r30, $sp, 56 + LDARG $r31, $sp, 64 + fld.d $f23, $sp, 72 + fld.d $f24, $sp, 80 + fld.d $f25, $sp, 96 + fld.d $f26, $sp, 104 + fld.d $f27, $sp, 112 + fld.d $f28, $sp, 120 + fld.d $f29, $sp, 128 + fld.d $f30, $sp, 136 + fld.d $f31, $sp, 144 + addi.d $sp, $sp, 160 + + /* Back home */ + jirl $r0, $r1, 0x0 + + EPILOGUE diff --git a/kernel/loongarch64/dgemm_ncopy_16.S b/kernel/loongarch64/dgemm_ncopy_16.S index 95c879031a..4c32e0ec79 100644 --- a/kernel/loongarch64/dgemm_ncopy_16.S +++ b/kernel/loongarch64/dgemm_ncopy_16.S @@ -655,6 +655,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d TD, TD, 0x10 .L_N1: + andi J, N, 0x01 + beq ZERO, J, .L_N0 move S1, TS beq ZERO, M, .L_N0 diff --git a/kernel/loongarch64/dgemm_ncopy_8_lsx.S b/kernel/loongarch64/dgemm_ncopy_8_lsx.S index 203c3eb27d..4ca485508a 100644 --- a/kernel/loongarch64/dgemm_ncopy_8_lsx.S +++ b/kernel/loongarch64/dgemm_ncopy_8_lsx.S @@ -268,6 +268,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d S2, S2, 0x08 addi.d TD, TD, 0x10 .L_N1: + andi J, N, 0x01 + beq ZERO, J, .L_N0 move S1, TS beq ZERO, M, .L_N0 .L_M1: diff --git a/kernel/loongarch64/dgemm_tcopy_6.S b/kernel/loongarch64/dgemm_tcopy_6.S new file mode 100644 index 0000000000..d3bb4a2a64 --- /dev/null +++ b/kernel/loongarch64/dgemm_tcopy_6.S @@ -0,0 +1,555 @@ +/******************************************************************************* +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#define ASSEMBLER + +#include "common.h" +/* Function parameters */ +#define M $r4 // param 1: m +#define N $r5 // param 2: n +#define SRC $r6 // param 3: src +#define LDA $r7 // param 4: lda +#define DST $r8 // param 5: dst + +#define I $r9 +#define J $r10 +#define S0 $r11 +#define S1 $r12 +#define S2 $r13 +#define S3 $r14 +#define S4 $r15 +#define S5 $r16 +#define S6 $r17 +#define S7 $r18 +#define S8 $r19 +#define P0 $r20 +#define P1 $r23 +#define P2 $r24 +#define P3 $r25 +#define P4 $r26 +#define T0 $r27 +#define T1 $r28 +#define T2 $r29 +#define TL $r7 +#define ZERO $r0 + +#define F0 $f0 +#define F1 $f1 +#define F2 $f2 +#define F3 $f3 +#define F4 $f4 +#define F5 $f5 +#define F6 $f6 +#define F7 $f7 +/* LSX vectors */ +#define V0 $vr0 +#define V1 $vr1 +#define V2 $vr2 +#define V3 $vr3 +/* LASX vectors */ +#define U0 $xr4 +#define U1 $xr5 +#define U2 $xr6 +#define U3 $xr7 +#define U4 $xr8 +#define U5 $xr9 +#define U6 $xr10 +#define U7 $xr11 + + PROLOGUE + + addi.d $sp, $sp, -56 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 24 + SDARG $r27, $sp, 32 + SDARG $r28, $sp, 40 + SDARG $r29, $sp, 48 + + move S0, SRC + move P0, DST + + addi.d I, ZERO, 0x06 + div.d T0, N, I // 1 + mul.d T1, I, T0 // 6 + sub.d N, N, T1 // 1 + + srai.d T2, N, 0x02 + slli.d T2, T2, 0x02 + add.d T2, T1, T2 + + mul.d P2, M, T1 + mul.d P3, M, T2 + slli.d P2, P2, 0x03 + slli.d P3, P3, 0x03 + + srai.d T2, N, 0x01 + srai.d J, M, 0x03 + slli.d T2, T2, 0x01 + add.d T2, T1, T2 + + add.d P2, DST, P2 + mul.d P4, M, T2 + add.d P3, DST, P3 + slli.d P4, P4, 0x03 + slli.d TL, LDA, 0x03 + add.d P4, DST, P4 + + slli.d T2, TL, 0x01 + slli.d T1, M, 0x03 + mul.d T1, T1, I + beq ZERO, J, .L_M7 + +.L_J1: /* J-- */ + move S1, S0 + add.d S2, S0, TL + add.d S3, S1, T2 + add.d S4, S2, T2 + add.d S5, S3, T2 + add.d S6, S4, T2 + add.d S7, S5, T2 + add.d S8, S6, T2 + add.d S0, S7, T2 + + move P1, P0 + addi.d P0, P0, 0x180 + + move I, T0 + addi.d J, J, -1 + beq ZERO, I, .L_N7 + +.L_I1: /* I-- */ + xvld U0, S1, 0x00 + vld V0, S1, 0x20 + xvld U1, S2, 0x00 + vld V1, S2, 0x20 + xvld U2, S3, 0x00 + vld V2, S3, 0x20 + xvld U3, S4, 0x00 + vld V3, S4, 0x20 + + xvst U0, P1, 0x00 + vst V0, P1, 0x20 + + xvst U1, P1, 0x30 + vst V1, P1, 0x50 + + xvst U2, P1, 0x60 + vst V2, P1, 0x80 + + xvst U3, P1, 0x90 + vst V3, P1, 0xB0 + + xvld U0, S5, 0x00 + vld V0, S5, 0x20 + xvld U1, S6, 0x00 + vld V1, S6, 0x20 + xvld U2, S7, 0x00 + vld V2, S7, 0x20 + xvld U3, S8, 0x00 + vld V3, S8, 0x20 + + xvst U0, P1, 0xC0 + vst V0, P1, 0xE0 + + xvst U1, P1, 0xF0 + vst V1, P1, 0x110 + + xvst U2, P1, 0x120 + vst V2, P1, 0x140 + + xvst U3, P1, 0x150 + vst V3, P1, 0x170 + + addi.d S1, S1, 0x30 + addi.d S2, S2, 0x30 + addi.d S3, S3, 0x30 + addi.d S4, S4, 0x30 + addi.d S5, S5, 0x30 + addi.d S6, S6, 0x30 + addi.d S7, S7, 0x30 + addi.d S8, S8, 0x30 + addi.d I, I, -1 + + add.d P1, P1, T1 + blt ZERO, I, .L_I1 + +.L_N7: + andi I, N, 0x04 + beq ZERO, I, .L_N3 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + xvld U2, S3, 0x00 + xvld U3, S4, 0x00 + xvld U4, S5, 0x00 + xvld U5, S6, 0x00 + xvld U6, S7, 0x00 + xvld U7, S8, 0x00 + + xvst U0, P2, 0x00 + xvst U1, P2, 0x20 + xvst U2, P2, 0x40 + xvst U3, P2, 0x60 + xvst U4, P2, 0x80 + xvst U5, P2, 0xA0 + xvst U6, P2, 0xC0 + xvst U7, P2, 0xE0 + + addi.d S1, S1, 0x20 + addi.d S2, S2, 0x20 + addi.d S3, S3, 0x20 + addi.d S4, S4, 0x20 + addi.d S5, S5, 0x20 + addi.d S6, S6, 0x20 + addi.d S7, S7, 0x20 + addi.d S8, S8, 0x20 + addi.d P2, P2, 0x100 + +.L_N3: + andi I, N, 0x02 + beq ZERO, I, .L_N1 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + xvld U2, S3, 0x00 + xvld U3, S4, 0x00 + xvld U4, S5, 0x00 + xvld U5, S6, 0x00 + xvld U6, S7, 0x00 + xvld U7, S8, 0x00 + + xvpermi.q U0, U1, 0x02 + xvpermi.q U2, U3, 0x02 + xvpermi.q U4, U5, 0x02 + xvpermi.q U6, U7, 0x02 + + xvst U0, P3, 0x00 + xvst U2, P3, 0x20 + xvst U4, P3, 0x40 + xvst U6, P3, 0x60 + + addi.d S1, S1, 0x10 + addi.d S2, S2, 0x10 + addi.d S3, S3, 0x10 + addi.d S4, S4, 0x10 + addi.d S5, S5, 0x10 + addi.d S6, S6, 0x10 + addi.d S7, S7, 0x10 + addi.d S8, S8, 0x10 + addi.d P3, P3, 0x80 + +.L_N1: + andi I, N, 0x01 + beq ZERO, I, .L_N0 + + fld.d F0, S1, 0x00 + fld.d F1, S2, 0x00 + fld.d F2, S3, 0x00 + fld.d F3, S4, 0x00 + fld.d F4, S5, 0x00 + fld.d F5, S6, 0x00 + fld.d F6, S7, 0x00 + fld.d F7, S8, 0x00 + + fst.d F0, P4, 0x00 + fst.d F1, P4, 0x08 + fst.d F2, P4, 0x10 + fst.d F3, P4, 0x18 + fst.d F4, P4, 0x20 + fst.d F5, P4, 0x28 + fst.d F6, P4, 0x30 + fst.d F7, P4, 0x38 + + addi.d S1, S1, 0x08 + addi.d S2, S2, 0x08 + addi.d S3, S3, 0x08 + addi.d S4, S4, 0x08 + addi.d S5, S5, 0x08 + addi.d S6, S6, 0x08 + addi.d S7, S7, 0x08 + addi.d S8, S8, 0x08 + addi.d P4, P4, 0x40 + +.L_N0: + blt ZERO, J, .L_J1 + +.L_M7: + andi J, M, 0x04 + beq ZERO, J, .L_M3 + + move S1, S0 + add.d S2, S0, TL + add.d S3, S1, T2 + add.d S4, S2, T2 + add.d S0, S3, T2 + + move P1, P0 + addi.d P0, P0, 0xC0 + + move I, T0 + beq ZERO, I, .L_4N7 + +.L_4I1: /* I-- */ + xvld U0, S1, 0x00 + vld V0, S1, 0x20 + xvld U1, S2, 0x00 + vld V1, S2, 0x20 + xvld U2, S3, 0x00 + vld V2, S3, 0x20 + xvld U3, S4, 0x00 + vld V3, S4, 0x20 + + xvst U0, P1, 0x00 + vst V0, P1, 0x20 + + xvst U1, P1, 0x30 + vst V1, P1, 0x50 + + xvst U2, P1, 0x60 + vst V2, P1, 0x80 + + xvst U3, P1, 0x90 + vst V3, P1, 0xB0 + + addi.d S1, S1, 0x30 + addi.d S2, S2, 0x30 + addi.d S3, S3, 0x30 + addi.d S4, S4, 0x30 + + addi.d I, I, -1 + add.d P1, P1, T1 + blt ZERO, I, .L_4I1 + +.L_4N7: + andi I, N, 0x04 + beq ZERO, I, .L_4N3 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + xvld U2, S3, 0x00 + xvld U3, S4, 0x00 + + xvst U0, P2, 0x00 + xvst U1, P2, 0x20 + xvst U2, P2, 0x40 + xvst U3, P2, 0x60 + + addi.d S1, S1, 0x20 + addi.d S2, S2, 0x20 + addi.d S3, S3, 0x20 + addi.d S4, S4, 0x20 + addi.d P2, P2, 0x80 + +.L_4N3: + andi I, N, 0x02 + beq ZERO, I, .L_4N1 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + xvld U2, S3, 0x00 + xvld U3, S4, 0x00 + + xvpermi.q U0, U1, 0x02 + xvpermi.q U2, U3, 0x02 + + xvst U0, P3, 0x00 + xvst U2, P3, 0x20 + + addi.d S1, S1, 0x10 + addi.d S2, S2, 0x10 + addi.d S3, S3, 0x10 + addi.d S4, S4, 0x10 + addi.d P3, P3, 0x40 + +.L_4N1: + andi I, N, 0x01 + beq ZERO, I, .L_M3 + + fld.d F0, S1, 0x00 + fld.d F1, S2, 0x00 + fld.d F2, S3, 0x00 + fld.d F3, S4, 0x00 + + fst.d F0, P4, 0x00 + fst.d F1, P4, 0x08 + fst.d F2, P4, 0x10 + fst.d F3, P4, 0x18 + + addi.d S1, S1, 0x08 + addi.d S2, S2, 0x08 + addi.d S3, S3, 0x08 + addi.d S4, S4, 0x08 + addi.d P4, P4, 0x20 + +.L_M3: + andi J, M, 0x02 + beq ZERO, J, .L_M1 + + move S1, S0 + add.d S2, S0, TL + add.d S0, S0, T2 + + move P1, P0 + addi.d P0, P0, 0x60 + + move I, T0 + beq ZERO, I, .L_2N7 + +.L_2I1: /* I-- */ + xvld U0, S1, 0x00 + vld V0, S1, 0x20 + xvld U1, S2, 0x00 + vld V1, S2, 0x20 + + xvst U0, P1, 0x00 + vst V0, P1, 0x20 + + xvst U1, P1, 0x30 + vst V1, P1, 0x50 + + addi.d S1, S1, 0x30 + addi.d S2, S2, 0x30 + addi.d I, I, -1 + add.d P1, P1, T1 + blt ZERO, I, .L_2I1 + +.L_2N7: + andi I, N, 0x04 + beq ZERO, I, .L_2N3 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + + xvst U0, P2, 0x00 + xvst U1, P2, 0x20 + + addi.d S1, S1, 0x20 + addi.d S2, S2, 0x20 + addi.d P2, P2, 0x40 + +.L_2N3: + andi I, N, 0x02 + beq ZERO, I, .L_2N1 + + xvld U0, S1, 0x00 + xvld U1, S2, 0x00 + + xvpermi.q U0, U1, 0x02 + + xvst U0, P3, 0x00 + + addi.d S1, S1, 0x10 + addi.d S2, S2, 0x10 + addi.d P3, P3, 0x20 + +.L_2N1: + andi I, N, 0x01 + beq ZERO, I, .L_M1 + + fld.d F0, S1, 0x00 + fld.d F1, S2, 0x00 + + fst.d F0, P4, 0x00 + fst.d F1, P4, 0x08 + + addi.d S1, S1, 0x08 + addi.d S2, S2, 0x08 + addi.d P4, P4, 0x10 + +.L_M1: + andi J, M, 0x01 + beq ZERO, J, .L_M0 + + move S1, S0 + add.d S2, S0, TL + + move P1, P0 + addi.d P0, P0, 0x30 + + move I, T0 + beq ZERO, I, .L_1N7 + +.L_1I1: /* I-- */ + xvld U0, S1, 0x00 + vld V0, S1, 0x20 + + xvst U0, P1, 0x00 + vst V0, P1, 0x20 + + addi.d S1, S1, 0x30 + addi.d I, I, -1 + add.d P1, P1, T1 + blt ZERO, I, .L_1I1 + +.L_1N7: + andi I, N, 0x04 + beq ZERO, I, .L_1N3 + + xvld U0, S1, 0x00 + + xvst U0, P2, 0x00 + + addi.d S1, S1, 0x20 + addi.d P2, P2, 0x20 + +.L_1N3: + andi I, N, 0x02 + beq ZERO, I, .L_1N1 + + fld.d F0, S1, 0x00 + fld.d F1, S1, 0x08 + + fst.d F0, P3, 0x00 + fst.d F1, P3, 0x08 + + addi.d S1, S1, 0x10 + addi.d P3, P3, 0x10 + +.L_1N1: + andi I, N, 0x01 + beq ZERO, I, .L_M0 + + fld.d F0, S1, 0x00 + + fst.d F0, P4, 0x00 + + addi.d S1, S1, 0x08 + addi.d P4, P4, 0x08 + +.L_M0: + LDARG $r23, $sp, 0 + LDARG $r24, $sp, 8 + LDARG $r25, $sp, 16 + LDARG $r26, $sp, 24 + LDARG $r27, $sp, 32 + LDARG $r28, $sp, 40 + LDARG $r29, $sp, 48 + addi.d $sp, $sp, 56 + jirl $r0, $r1, 0x00 + + EPILOGUE diff --git a/kernel/loongarch64/gemm_ncopy_6.prefx.c b/kernel/loongarch64/gemm_ncopy_6.prefx.c new file mode 100644 index 0000000000..65680d4e32 --- /dev/null +++ b/kernel/loongarch64/gemm_ncopy_6.prefx.c @@ -0,0 +1,299 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ + BLASLONG i, j; + BLASLONG nmod6; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + FLOAT *aoffset5, *aoffset6 ; + + FLOAT *boffset; + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + FLOAT ctemp09, ctemp10, ctemp11, ctemp12; + FLOAT ctemp13, ctemp14, ctemp15, ctemp16; + FLOAT ctemp17, ctemp18, ctemp19, ctemp20; + FLOAT ctemp21, ctemp22, ctemp23, ctemp24; + + nmod6 = n - (n / 6)* 6 ; + aoffset = a; + boffset = b; + + // prefex A: 1 block, block size: 4*8 bytes, offset: 16*8 bytes, base: aoffset1,2,,6; + BLASULONG index = 0x100080; //( (1<<20)|(16<<3)&0xffff) ) ; + // prefex B: 1 block, block size: 24*8 bytes, offset: 96*8 bytes, base: boffset; + BLASULONG index_b = 0xb00300; //(11<<20) | ((96*8)&0xffff) ; + + j = (n / 6); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset += 6 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + + ctemp17 = *(aoffset5 + 0); + ctemp18 = *(aoffset5 + 1); + ctemp19 = *(aoffset5 + 2); + ctemp20 = *(aoffset5 + 3); + + ctemp21 = *(aoffset6 + 0); + ctemp22 = *(aoffset6 + 1); + ctemp23 = *(aoffset6 + 2); + ctemp24 = *(aoffset6 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp05; + *(boffset + 2) = ctemp09; + *(boffset + 3) = ctemp13; + *(boffset + 4) = ctemp17; + *(boffset + 5) = ctemp21; + + *(boffset + 6) = ctemp02; + *(boffset + 7) = ctemp06; + *(boffset + 8) = ctemp10; + *(boffset + 9) = ctemp14; + *(boffset + 10) = ctemp18; + *(boffset + 11) = ctemp22; + + *(boffset + 12) = ctemp03; + *(boffset + 13) = ctemp07; + *(boffset + 14) = ctemp11; + *(boffset + 15) = ctemp15; + *(boffset + 16) = ctemp19; + *(boffset + 17) = ctemp23; + + *(boffset + 18) = ctemp04; + *(boffset + 19) = ctemp08; + *(boffset + 20) = ctemp12; + *(boffset + 21) = ctemp16; + *(boffset + 22) = ctemp20; + *(boffset + 23) = ctemp24; + + aoffset1 += 4; + aoffset2 += 4; + aoffset3 += 4; + aoffset4 += 4; + aoffset5 += 4; + aoffset6 += 4; + + boffset += 24; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + ctemp05 = *(aoffset3 + 0); + ctemp07 = *(aoffset4 + 0); + ctemp09 = *(aoffset5 + 0); + ctemp11 = *(aoffset6 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + *(boffset + 4) = ctemp09; + *(boffset + 5) = ctemp11; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + aoffset5 ++; + aoffset6 ++; + boffset += 6; + i --; + }while(i > 0); + } + + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (nmod6 & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + ctemp05 = *(aoffset3 + 0); + ctemp06 = *(aoffset3 + 1); + ctemp07 = *(aoffset4 + 0); + ctemp08 = *(aoffset4 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + *(boffset + 4) = ctemp02; + *(boffset + 5) = ctemp04; + *(boffset + 6) = ctemp06; + *(boffset + 7) = ctemp08; + + aoffset1 += 2; + aoffset2 += 2; + aoffset3 += 2; + aoffset4 += 2; + boffset += 8; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + ctemp05 = *(aoffset3 + 0); + ctemp07 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp05; + *(boffset + 3) = ctemp07; + boffset += 4; + } + } + + if (nmod6 & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp04; + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp03 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + boffset += 2; + } + } + + if (nmod6 & 1){ + aoffset1 = aoffset; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 += 2; + boffset += 2; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + } + } + + return 0; +} diff --git a/kernel/loongarch64/icamax_lsx.S b/kernel/loongarch64/icamax_lsx.S index a2fc9dbbd8..c22ade4b38 100644 --- a/kernel/loongarch64/icamax_lsx.S +++ b/kernel/loongarch64/icamax_lsx.S @@ -308,8 +308,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vinsgr2vr.w x1, t3, 3 vinsgr2vr.w x2, t4, 3 addi.d I, I, -1 - vpickev.w x1, VX1, VX0 - vpickod.w x2, VX1, VX0 vfmul.s x3, VI4, x1 vfmul.s x4, VI4, x2 vfcmp.clt.s VT0, x1, VI3 diff --git a/kernel/loongarch64/trsm_kernel_LN_UNROLLN6.c b/kernel/loongarch64/trsm_kernel_LN_UNROLLN6.c new file mode 100644 index 0000000000..5e25a5e3e4 --- /dev/null +++ b/kernel/loongarch64/trsm_kernel_LN_UNROLLN6.c @@ -0,0 +1,342 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = 0; k < i; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + // j = (n >> GEMM_UNROLL_N_SHIFT); + j = (n / 6); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + BLASLONG nmodN = n - n/6*6 ; + + // if (n & (GEMM_UNROLL_N - 1)) { + if (nmodN) { + + // j = (GEMM_UNROLL_N >> 1); + j = 4; + while (j > 0) { + if (nmodN & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/loongarch64/trsm_kernel_LT_UNROLLN6.c b/kernel/loongarch64/trsm_kernel_LT_UNROLLN6.c new file mode 100644 index 0000000000..2106c88cae --- /dev/null +++ b/kernel/loongarch64/trsm_kernel_LT_UNROLLN6.c @@ -0,0 +1,327 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < m; i++) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = i + 1; k < m; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a += m; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < m; i++) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = i + 1; k < m; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a += m * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + + // j = (n >> GEMM_UNROLL_N_SHIFT); + j = (n / 6); + + while (j > 0) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + BLASLONG nmodN = n - n/6*6 ; + + // if (n & (GEMM_UNROLL_N - 1)) { + if (nmodN) { + + // j = (GEMM_UNROLL_N >> 1); + j = 4; + + while (j > 0) { + if (nmodN & j) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/loongarch64/trsm_kernel_RN_UNROLLN6.c b/kernel/loongarch64/trsm_kernel_RN_UNROLLN6.c new file mode 100644 index 0000000000..42d5155c33 --- /dev/null +++ b/kernel/loongarch64/trsm_kernel_RN_UNROLLN6.c @@ -0,0 +1,325 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + // j = (n >> GEMM_UNROLL_N_SHIFT); + j = (n / 6); + kk = -offset; + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + BLASLONG nmodN = n - n/6*6 ; + + // if (n & (GEMM_UNROLL_N - 1)) { + if (nmodN) { + + // j = (GEMM_UNROLL_N >> 1); + j = 4; + + while (j > 0) { + if (nmodN & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/loongarch64/trsm_kernel_RT_UNROLLN6.c b/kernel/loongarch64/trsm_kernel_RT_UNROLLN6.c new file mode 100644 index 0000000000..7424ad5791 --- /dev/null +++ b/kernel/loongarch64/trsm_kernel_RT_UNROLLN6.c @@ -0,0 +1,351 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + + BLASLONG nmodN = n - n/6*6 ; + + // if (n & (GEMM_UNROLL_N - 1)) { + if (nmodN) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (nmodN & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + // j = (n >> GEMM_UNROLL_N_SHIFT); + j = (n / 6); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + + diff --git a/param.h b/param.h index 84e241fcd2..2ebe824db5 100644 --- a/param.h +++ b/param.h @@ -2856,7 +2856,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_N 4 #define ZGEMM_DEFAULT_UNROLL_M 1 #else -#define DGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_N 6 #define DGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 8 #define SGEMM_DEFAULT_UNROLL_M 16 @@ -2864,6 +2864,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_UNROLL_M 16 #define ZGEMM_DEFAULT_UNROLL_N 4 #define ZGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_MN 96 #endif #define QGEMM_DEFAULT_UNROLL_N 2