From dda211b20e964a042e7d294f204f3b7625fcff8a Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 15 Jan 2025 22:46:23 +0100 Subject: [PATCH] Protect align directives that are currently problematic with LLVM on WoA --- kernel/arm64/copy_thunderx2t99.c | 433 +++++++++---------- kernel/arm64/dasum_thunderx2t99.c | 522 +++++++++++----------- kernel/arm64/dot_kernel_asimd.c | 691 +++++++++++++++--------------- kernel/arm64/sasum_thunderx2t99.c | 525 ++++++++++++----------- kernel/arm64/zasum_thunderx2t99.c | 525 ++++++++++++----------- 5 files changed, 1351 insertions(+), 1345 deletions(-) diff --git a/kernel/arm64/copy_thunderx2t99.c b/kernel/arm64/copy_thunderx2t99.c index e318761391..263cc30130 100644 --- a/kernel/arm64/copy_thunderx2t99.c +++ b/kernel/arm64/copy_thunderx2t99.c @@ -1,216 +1,217 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -#include "common.h" - -#include -#define N "x0" /* vector length */ -#define X "x1" /* X vector address */ -#define INC_X "x2" /* X stride */ -#define Y "x3" /* Y vector address */ -#define INC_Y "x4" /* Y stride */ -#define J "x5" /* loop variable */ - -/******************************************************************************* -* Macro definitions -*******************************************************************************/ -#if !defined(COMPLEX) -#if !defined(DOUBLE) -#define TMPF "s0" -#define INC_SHIFT "2" -#define N_DIV_SHIFT "2" -#define N_REM_MASK "3" -#else -#define TMPF "d0" -#define INC_SHIFT "3" -#define N_DIV_SHIFT "1" -#define N_REM_MASK "1" -#endif -#else -#if !defined(DOUBLE) -#define TMPF "d0" -#define INC_SHIFT "3" -#define N_DIV_SHIFT "1" -#define N_REM_MASK "1" -#else -#define TMPF "q0" -#define INC_SHIFT "4" -#define N_DIV_SHIFT "0" -#define N_REM_MASK "0" -#endif -#endif - -#define KERNEL_F1 \ - "ldr "TMPF", ["X"] \n" \ - "add "X", "X", "INC_X" \n" \ - "str "TMPF", ["Y"] \n" \ - "add "Y", "Y", "INC_Y" \n" - -#define KERNEL_F \ - "ldr q0, ["X"], #16 \n" \ - "str q0, ["Y"], #16 \n" - -#define INIT \ - "lsl "INC_X", "INC_X", #"INC_SHIFT" \n" \ - "lsl "INC_Y", "INC_Y", #"INC_SHIFT" \n" - - -static int do_copy(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) -{ - if ( n < 0 ) return 0; - - __asm__ __volatile__ ( - " mov "N", %[N_] \n" - " mov "X", %[X_] \n" - " mov "INC_X", %[INCX_] \n" - " mov "Y", %[Y_] \n" - " mov "INC_Y", %[INCY_] \n" - " cmp "N", xzr \n" - " ble 8f //copy_kernel_L999 \n" - " cmp "INC_X", #1 \n" - " bne 4f //copy_kernel_S_BEGIN \n" - " cmp "INC_Y", #1 \n" - " bne 4f //copy_kernel_S_BEGIN \n" - - "// .Lcopy_kernel_F_BEGIN: \n" - " "INIT" \n" - " asr "J", "N", #"N_DIV_SHIFT" \n" - " cmp "J", xzr \n" - " beq 2f //copy_kernel_F1 \n" - " .align 5 \n" - - "1: //copy_kernel_F: \n" - " "KERNEL_F" \n" - " subs "J", "J", #1 \n" - " bne 1b //copy_kernel_F \n" - - "2: //copy_kernel_F1: \n" -#if defined(COMPLEX) && defined(DOUBLE) - " b 8f //copy_kernel_L999 \n" -#else - " ands "J", "N", #"N_REM_MASK" \n" - " ble 8f //copy_kernel_L999 \n" -#endif - - "3: //copy_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 3b //copy_kernel_F10 \n" - " b 8f //copy_kernel_L999 \n" - - "4: //copy_kernel_S_BEGIN: \n" - " "INIT" \n" - " asr "J", "N", #2 \n" - " cmp "J", xzr \n" - " ble 6f //copy_kernel_S1 \n" - - "5: //copy_kernel_S4: \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 5b //copy_kernel_S4 \n" - - "6: //copy_kernel_S1: \n" - " ands "J", "N", #3 \n" - " ble 8f //copy_kernel_L999 \n" - - "7: //copy_kernel_S10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 7b //copy_kernel_S10 \n" - - "8: //copy_kernel_L999: \n" - - : - : [N_] "r" (n), //%1 - [X_] "r" (x), //%2 - [INCX_] "r" (inc_x), //%3 - [Y_] "r" (y), //%4 - [INCY_] "r" (inc_y) //%5 - : "cc", - "memory", - "x0", "x1", "x2", "x3", "x4", "x5", - "d0" - ); - - return 0; -} - -#if defined(SMP) -static int copy_thread_function(BLASLONG n, BLASLONG dummy0, - BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, - BLASLONG inc_y, FLOAT *dummy3, BLASLONG dummy4) -{ - do_copy(n, x, inc_x, y, inc_y); - - return 0; -} -#endif - -int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) -{ -#if defined(SMP) - int nthreads; - FLOAT dummy_alpha; -#endif - - if (n <= 0) return 0; - -#if defined(SMP) - if (inc_x == 0 || n <= 10000) - nthreads = 1; - else - nthreads = num_cpu_avail(1); - - if (nthreads == 1) { - do_copy(n, x, inc_x, y, inc_y); - } else { - int mode = 0; - -#if !defined(COMPLEX) - mode = BLAS_REAL; -#else - mode = BLAS_COMPLEX; -#endif -#if !defined(DOUBLE) - mode |= BLAS_SINGLE; -#else - mode |= BLAS_DOUBLE; -#endif - - blas_level1_thread(mode, n, 0, 0, &dummy_alpha, - x, inc_x, y, inc_y, NULL, 0, - ( void *)copy_thread_function, nthreads); - } -#else - do_copy(n, x, inc_x, y, inc_y); -#endif - - return 0; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#define N "x0" /* vector length */ +#define X "x1" /* X vector address */ +#define INC_X "x2" /* X stride */ +#define Y "x3" /* Y vector address */ +#define INC_Y "x4" /* Y stride */ +#define J "x5" /* loop variable */ + +/******************************************************************************* +* Macro definitions +*******************************************************************************/ +#if !defined(COMPLEX) +#if !defined(DOUBLE) +#define TMPF "s0" +#define INC_SHIFT "2" +#define N_DIV_SHIFT "2" +#define N_REM_MASK "3" +#else +#define TMPF "d0" +#define INC_SHIFT "3" +#define N_DIV_SHIFT "1" +#define N_REM_MASK "1" +#endif +#else +#if !defined(DOUBLE) +#define TMPF "d0" +#define INC_SHIFT "3" +#define N_DIV_SHIFT "1" +#define N_REM_MASK "1" +#else +#define TMPF "q0" +#define INC_SHIFT "4" +#define N_DIV_SHIFT "0" +#define N_REM_MASK "0" +#endif +#endif + +#define KERNEL_F1 \ + "ldr "TMPF", ["X"] \n" \ + "add "X", "X", "INC_X" \n" \ + "str "TMPF", ["Y"] \n" \ + "add "Y", "Y", "INC_Y" \n" + +#define KERNEL_F \ + "ldr q0, ["X"], #16 \n" \ + "str q0, ["Y"], #16 \n" + +#define INIT \ + "lsl "INC_X", "INC_X", #"INC_SHIFT" \n" \ + "lsl "INC_Y", "INC_Y", #"INC_SHIFT" \n" + + +static int do_copy(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ + if ( n < 0 ) return 0; + + __asm__ __volatile__ ( + " mov "N", %[N_] \n" + " mov "X", %[X_] \n" + " mov "INC_X", %[INCX_] \n" + " mov "Y", %[Y_] \n" + " mov "INC_Y", %[INCY_] \n" + " cmp "N", xzr \n" + " ble 8f //copy_kernel_L999 \n" + " cmp "INC_X", #1 \n" + " bne 4f //copy_kernel_S_BEGIN \n" + " cmp "INC_Y", #1 \n" + " bne 4f //copy_kernel_S_BEGIN \n" + + "// .Lcopy_kernel_F_BEGIN: \n" + " "INIT" \n" + " asr "J", "N", #"N_DIV_SHIFT" \n" + " cmp "J", xzr \n" + " beq 2f //copy_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) + " .align 5 \n" +#endif + "1: //copy_kernel_F: \n" + " "KERNEL_F" \n" + " subs "J", "J", #1 \n" + " bne 1b //copy_kernel_F \n" + + "2: //copy_kernel_F1: \n" +#if defined(COMPLEX) && defined(DOUBLE) + " b 8f //copy_kernel_L999 \n" +#else + " ands "J", "N", #"N_REM_MASK" \n" + " ble 8f //copy_kernel_L999 \n" +#endif + + "3: //copy_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 3b //copy_kernel_F10 \n" + " b 8f //copy_kernel_L999 \n" + + "4: //copy_kernel_S_BEGIN: \n" + " "INIT" \n" + " asr "J", "N", #2 \n" + " cmp "J", xzr \n" + " ble 6f //copy_kernel_S1 \n" + + "5: //copy_kernel_S4: \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 5b //copy_kernel_S4 \n" + + "6: //copy_kernel_S1: \n" + " ands "J", "N", #3 \n" + " ble 8f //copy_kernel_L999 \n" + + "7: //copy_kernel_S10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 7b //copy_kernel_S10 \n" + + "8: //copy_kernel_L999: \n" + + : + : [N_] "r" (n), //%1 + [X_] "r" (x), //%2 + [INCX_] "r" (inc_x), //%3 + [Y_] "r" (y), //%4 + [INCY_] "r" (inc_y) //%5 + : "cc", + "memory", + "x0", "x1", "x2", "x3", "x4", "x5", + "d0" + ); + + return 0; +} + +#if defined(SMP) +static int copy_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, FLOAT *dummy3, BLASLONG dummy4) +{ + do_copy(n, x, inc_x, y, inc_y); + + return 0; +} +#endif + +int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + + if (n <= 0) return 0; + +#if defined(SMP) + if (inc_x == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + do_copy(n, x, inc_x, y, inc_y); + } else { + int mode = 0; + +#if !defined(COMPLEX) + mode = BLAS_REAL; +#else + mode = BLAS_COMPLEX; +#endif +#if !defined(DOUBLE) + mode |= BLAS_SINGLE; +#else + mode |= BLAS_DOUBLE; +#endif + + blas_level1_thread(mode, n, 0, 0, &dummy_alpha, + x, inc_x, y, inc_y, NULL, 0, + ( void *)copy_thread_function, nthreads); + } +#else + do_copy(n, x, inc_x, y, inc_y); +#endif + + return 0; +} diff --git a/kernel/arm64/dasum_thunderx2t99.c b/kernel/arm64/dasum_thunderx2t99.c index a212c9534b..fd1df0e9ab 100644 --- a/kernel/arm64/dasum_thunderx2t99.c +++ b/kernel/arm64/dasum_thunderx2t99.c @@ -1,260 +1,262 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -#include "common.h" - -#include - -#define N "x0" /* vector length */ -#define X "x1" /* "X" vector address */ -#define INC_X "x2" /* "X" stride */ -#define J "x5" /* loop variable */ - -#define REG0 "xzr" -#define SUMF "d0" -#define TMPF "d1" - -/******************************************************************************/ - -#define KERNEL_F1 \ - "ldr "TMPF", ["X"] \n" \ - "add "X", "X", #8 \n" \ - "fabs "TMPF", "TMPF" \n" \ - "fadd "SUMF", "SUMF", "TMPF" \n" - -#define KERNEL_F32 \ - "ldr q16, ["X"] \n" \ - "ldr q17, ["X", #16] \n" \ - "ldr q18, ["X", #32] \n" \ - "ldr q19, ["X", #48] \n" \ - "ldp q20, q21, ["X", #64] \n" \ - "ldp q22, q23, ["X", #96] \n" \ - "fabs v16.2d, v16.2d \n" \ - "fabs v17.2d, v17.2d \n" \ - "fabs v18.2d, v18.2d \n" \ - "fabs v19.2d, v19.2d \n" \ - "ldp q24, q25, ["X", #128] \n" \ - "ldp q26, q27, ["X", #160] \n" \ - "fabs v20.2d, v20.2d \n" \ - "fabs v21.2d, v21.2d \n" \ - "fabs v22.2d, v22.2d \n" \ - "fabs v23.2d, v23.2d \n" \ - "fadd v16.2d, v16.2d, v17.2d \n" \ - "fadd v18.2d, v18.2d, v19.2d \n" \ - "ldp q28, q29, ["X", #192] \n" \ - "ldp q30, q31, ["X", #224] \n" \ - "fabs v24.2d, v24.2d \n" \ - "fabs v25.2d, v25.2d \n" \ - "fabs v26.2d, v26.2d \n" \ - "fabs v27.2d, v27.2d \n" \ - "add "X", "X", #256 \n" \ - "fadd v20.2d, v20.2d, v21.2d \n" \ - "fadd v22.2d, v22.2d, v23.2d \n" \ - "fabs v28.2d, v28.2d \n" \ - "fabs v29.2d, v29.2d \n" \ - "fabs v30.2d, v30.2d \n" \ - "fabs v31.2d, v31.2d \n" \ - "PRFM PLDL1KEEP, ["X", #1024] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ - "fadd v24.2d, v24.2d, v25.2d \n" \ - "fadd v26.2d, v26.2d, v27.2d \n" \ - "fadd v28.2d, v28.2d, v29.2d \n" \ - "fadd v30.2d, v30.2d, v31.2d \n" \ - "fadd v0.2d, v0.2d, v16.2d \n" \ - "fadd v1.2d, v1.2d, v18.2d \n" \ - "fadd v2.2d, v2.2d, v20.2d \n" \ - "fadd v3.2d, v3.2d, v22.2d \n" \ - "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ - "fadd v4.2d, v4.2d, v24.2d \n" \ - "fadd v5.2d, v5.2d, v26.2d \n" \ - "fadd v6.2d, v6.2d, v28.2d \n" \ - "fadd v7.2d, v7.2d, v30.2d \n" - -#define KERNEL_F32_FINALIZE \ - "fadd v0.2d, v0.2d, v1.2d \n" \ - "fadd v2.2d, v2.2d, v3.2d \n" \ - "fadd v4.2d, v4.2d, v5.2d \n" \ - "fadd v6.2d, v6.2d, v7.2d \n" \ - "fadd v0.2d, v0.2d, v2.2d \n" \ - "fadd v4.2d, v4.2d, v6.2d \n" \ - "fadd v0.2d, v0.2d, v4.2d \n" \ - "faddp "SUMF", v0.2d \n" - -#define INIT_S \ - "lsl "INC_X", "INC_X", #3 \n" - -#define KERNEL_S1 \ - "ldr "TMPF", ["X"] \n" \ - "add "X", "X", "INC_X" \n" \ - "fabs "TMPF", "TMPF" \n" \ - "fadd "SUMF", "SUMF", "TMPF" \n" - - -#if defined(SMP) -extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, - BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, - void *c, BLASLONG ldc, int (*function)(), int nthreads); -#endif - - -static FLOAT dasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ - FLOAT asum = 0.0 ; - - if ( n < 0 ) return(asum); - - __asm__ __volatile__ ( - " mov "N", %[N_] \n" - " mov "X", %[X_] \n" - " mov "INC_X", %[INCX_] \n" - " fmov "SUMF", "REG0" \n" - " fmov d1, "REG0" \n" - " fmov d2, "REG0" \n" - " fmov d3, "REG0" \n" - " fmov d4, "REG0" \n" - " fmov d5, "REG0" \n" - " fmov d6, "REG0" \n" - " fmov d7, "REG0" \n" - " cmp "N", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", #1 \n" - " bne 5f //asum_kernel_S_BEGIN \n" - - "1: //asum_kernel_F_BEGIN: \n" - " asr "J", "N", #5 \n" - " cmp "J", xzr \n" - " beq 3f //asum_kernel_F1 \n" - - ".align 5 \n" - "2: //asum_kernel_F32: \n" - " "KERNEL_F32" \n" - " subs "J", "J", #1 \n" - " bne 2b //asum_kernel_F32 \n" - " "KERNEL_F32_FINALIZE" \n" - - "3: //asum_kernel_F1: \n" - " ands "J", "N", #31 \n" - " ble 9f //asum_kernel_L999 \n" - - "4: //asum_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 4b //asum_kernel_F10 \n" - " b 9f //asum_kernel_L999 \n" - - "5: //asum_kernel_S_BEGIN: \n" - " "INIT_S" \n" - " asr "J", "N", #2 \n" - " cmp "J", xzr \n" - " ble 7f //asum_kernel_S1 \n" - - "6: //asum_kernel_S4: \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 6b //asum_kernel_S4 \n" - - "7: //asum_kernel_S1: \n" - " ands "J", "N", #3 \n" - " ble 9f //asum_kernel_L999 \n" - - "8: //asum_kernel_S10: \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 8b //asum_kernel_S10 \n" - - "9: //asum_kernel_L999: \n" - " fmov %[ASUM_], "SUMF" \n" - - : [ASUM_] "=r" (asum) //%0 - : [N_] "r" (n), //%1 - [X_] "r" (x), //%2 - [INCX_] "r" (inc_x) //%3 - : "cc", - "memory", - "x0", "x1", "x2", "x3", "x4", "x5", - "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" - ); - - return asum; -} - -#if defined(SMP) -static int dasum_thread_function(BLASLONG n, BLASLONG dummy0, - BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, - BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) -{ - *result = dasum_compute(n, x, inc_x); - - return 0; -} -#endif - -FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ -#if defined(SMP) - int nthreads; - FLOAT dummy_alpha; -#endif - FLOAT asum = 0.0; - -#if defined(SMP) - if (inc_x == 0 || n <= 10000) - nthreads = 1; - else - nthreads = num_cpu_avail(1); - - if (nthreads == 1) { - asum = dasum_compute(n, x, inc_x); - } else { - int mode, i; - char result[MAX_CPU_NUMBER * sizeof(double) * 2]; - FLOAT *ptr; - - mode = BLAS_DOUBLE; - - blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, - x, inc_x, NULL, 0, result, 0, - ( void *)dasum_thread_function, nthreads); - - ptr = (FLOAT *)result; - for (i = 0; i < nthreads; i++) { - asum = asum + (*ptr); - ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); - } - } -#else - asum = dasum_compute(n, x, inc_x); -#endif - - return asum; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include + +#define N "x0" /* vector length */ +#define X "x1" /* "X" vector address */ +#define INC_X "x2" /* "X" stride */ +#define J "x5" /* loop variable */ + +#define REG0 "xzr" +#define SUMF "d0" +#define TMPF "d1" + +/******************************************************************************/ + +#define KERNEL_F1 \ + "ldr "TMPF", ["X"] \n" \ + "add "X", "X", #8 \n" \ + "fabs "TMPF", "TMPF" \n" \ + "fadd "SUMF", "SUMF", "TMPF" \n" + +#define KERNEL_F32 \ + "ldr q16, ["X"] \n" \ + "ldr q17, ["X", #16] \n" \ + "ldr q18, ["X", #32] \n" \ + "ldr q19, ["X", #48] \n" \ + "ldp q20, q21, ["X", #64] \n" \ + "ldp q22, q23, ["X", #96] \n" \ + "fabs v16.2d, v16.2d \n" \ + "fabs v17.2d, v17.2d \n" \ + "fabs v18.2d, v18.2d \n" \ + "fabs v19.2d, v19.2d \n" \ + "ldp q24, q25, ["X", #128] \n" \ + "ldp q26, q27, ["X", #160] \n" \ + "fabs v20.2d, v20.2d \n" \ + "fabs v21.2d, v21.2d \n" \ + "fabs v22.2d, v22.2d \n" \ + "fabs v23.2d, v23.2d \n" \ + "fadd v16.2d, v16.2d, v17.2d \n" \ + "fadd v18.2d, v18.2d, v19.2d \n" \ + "ldp q28, q29, ["X", #192] \n" \ + "ldp q30, q31, ["X", #224] \n" \ + "fabs v24.2d, v24.2d \n" \ + "fabs v25.2d, v25.2d \n" \ + "fabs v26.2d, v26.2d \n" \ + "fabs v27.2d, v27.2d \n" \ + "add "X", "X", #256 \n" \ + "fadd v20.2d, v20.2d, v21.2d \n" \ + "fadd v22.2d, v22.2d, v23.2d \n" \ + "fabs v28.2d, v28.2d \n" \ + "fabs v29.2d, v29.2d \n" \ + "fabs v30.2d, v30.2d \n" \ + "fabs v31.2d, v31.2d \n" \ + "PRFM PLDL1KEEP, ["X", #1024] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ + "fadd v24.2d, v24.2d, v25.2d \n" \ + "fadd v26.2d, v26.2d, v27.2d \n" \ + "fadd v28.2d, v28.2d, v29.2d \n" \ + "fadd v30.2d, v30.2d, v31.2d \n" \ + "fadd v0.2d, v0.2d, v16.2d \n" \ + "fadd v1.2d, v1.2d, v18.2d \n" \ + "fadd v2.2d, v2.2d, v20.2d \n" \ + "fadd v3.2d, v3.2d, v22.2d \n" \ + "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ + "fadd v4.2d, v4.2d, v24.2d \n" \ + "fadd v5.2d, v5.2d, v26.2d \n" \ + "fadd v6.2d, v6.2d, v28.2d \n" \ + "fadd v7.2d, v7.2d, v30.2d \n" + +#define KERNEL_F32_FINALIZE \ + "fadd v0.2d, v0.2d, v1.2d \n" \ + "fadd v2.2d, v2.2d, v3.2d \n" \ + "fadd v4.2d, v4.2d, v5.2d \n" \ + "fadd v6.2d, v6.2d, v7.2d \n" \ + "fadd v0.2d, v0.2d, v2.2d \n" \ + "fadd v4.2d, v4.2d, v6.2d \n" \ + "fadd v0.2d, v0.2d, v4.2d \n" \ + "faddp "SUMF", v0.2d \n" + +#define INIT_S \ + "lsl "INC_X", "INC_X", #3 \n" + +#define KERNEL_S1 \ + "ldr "TMPF", ["X"] \n" \ + "add "X", "X", "INC_X" \n" \ + "fabs "TMPF", "TMPF" \n" \ + "fadd "SUMF", "SUMF", "TMPF" \n" + + +#if defined(SMP) +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, + BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, + void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + + +static FLOAT dasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + FLOAT asum = 0.0 ; + + if ( n < 0 ) return(asum); + + __asm__ __volatile__ ( + " mov "N", %[N_] \n" + " mov "X", %[X_] \n" + " mov "INC_X", %[INCX_] \n" + " fmov "SUMF", "REG0" \n" + " fmov d1, "REG0" \n" + " fmov d2, "REG0" \n" + " fmov d3, "REG0" \n" + " fmov d4, "REG0" \n" + " fmov d5, "REG0" \n" + " fmov d6, "REG0" \n" + " fmov d7, "REG0" \n" + " cmp "N", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", #1 \n" + " bne 5f //asum_kernel_S_BEGIN \n" + + "1: //asum_kernel_F_BEGIN: \n" + " asr "J", "N", #5 \n" + " cmp "J", xzr \n" + " beq 3f //asum_kernel_F1 \n" + +#if !(defined(__clang__) && defined(OS_WINDOWS)) + ".align 5 \n" +#endif + "2: //asum_kernel_F32: \n" + " "KERNEL_F32" \n" + " subs "J", "J", #1 \n" + " bne 2b //asum_kernel_F32 \n" + " "KERNEL_F32_FINALIZE" \n" + + "3: //asum_kernel_F1: \n" + " ands "J", "N", #31 \n" + " ble 9f //asum_kernel_L999 \n" + + "4: //asum_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 4b //asum_kernel_F10 \n" + " b 9f //asum_kernel_L999 \n" + + "5: //asum_kernel_S_BEGIN: \n" + " "INIT_S" \n" + " asr "J", "N", #2 \n" + " cmp "J", xzr \n" + " ble 7f //asum_kernel_S1 \n" + + "6: //asum_kernel_S4: \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 6b //asum_kernel_S4 \n" + + "7: //asum_kernel_S1: \n" + " ands "J", "N", #3 \n" + " ble 9f //asum_kernel_L999 \n" + + "8: //asum_kernel_S10: \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 8b //asum_kernel_S10 \n" + + "9: //asum_kernel_L999: \n" + " fmov %[ASUM_], "SUMF" \n" + + : [ASUM_] "=r" (asum) //%0 + : [N_] "r" (n), //%1 + [X_] "r" (x), //%2 + [INCX_] "r" (inc_x) //%3 + : "cc", + "memory", + "x0", "x1", "x2", "x3", "x4", "x5", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" + ); + + return asum; +} + +#if defined(SMP) +static int dasum_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) +{ + *result = dasum_compute(n, x, inc_x); + + return 0; +} +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT asum = 0.0; + +#if defined(SMP) + if (inc_x == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + asum = dasum_compute(n, x, inc_x); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) * 2]; + FLOAT *ptr; + + mode = BLAS_DOUBLE; + + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, NULL, 0, result, 0, + ( void *)dasum_thread_function, nthreads); + + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + asum = asum + (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + asum = dasum_compute(n, x, inc_x); +#endif + + return asum; +} diff --git a/kernel/arm64/dot_kernel_asimd.c b/kernel/arm64/dot_kernel_asimd.c index 1288838f87..ffb309ab9e 100644 --- a/kernel/arm64/dot_kernel_asimd.c +++ b/kernel/arm64/dot_kernel_asimd.c @@ -1,345 +1,346 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -Copyright (c) 2022, Arm Ltd -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - - -#include "common.h" - -#include - -#if !defined(DSDOT) -#define RETURN_TYPE FLOAT -#else -#define RETURN_TYPE double -#endif - -#if !defined(DOUBLE) -#if !defined(DSDOT) -#define DOT_MOD "s" -#define REG0 "wzr" -#define TMPX "s16" -#define TMPY "s24" -#define INC_SHIFT "2" -#define N_DIV_SHIFT "6" -#define N_REM_MASK "63" -#else -#define DOT_MOD "d" -#define REG0 "xzr" -#define TMPX "s16" -#define TMPX1 "d2" -#define TMPY "s24" -#define TMPY1 "d3" -#define INC_SHIFT "2" -#define N_DIV_SHIFT "4" -#define N_REM_MASK "15" -#endif -#else -#define DOT_MOD "d" -#define REG0 "xzr" -#define TMPX "d16" -#define TMPY "d24" -#define INC_SHIFT "3" -#define N_DIV_SHIFT "5" -#define N_REM_MASK "31" -#endif - -#define OUT "%"DOT_MOD"[DOT_]" - -#if !defined(DOUBLE) - -#if !defined(DSDOT) -#define KERNEL_F1 \ - " ldr "TMPX", [%[X_]] \n" \ - " ldr "TMPY", [%[Y_]] \n" \ - " add %[X_], %[X_], %[INCX_] \n" \ - " add %[Y_], %[Y_], %[INCY_] \n" \ - " fmadd "OUT", "TMPX", "TMPY", "OUT" \n" - -#define KERNEL_F \ - " ldp q16, q17, [%[X_]] \n" \ - " ldp q24, q25, [%[Y_]] \n" \ - " ldp q18, q19, [%[X_], #32] \n" \ - " ldp q26, q27, [%[Y_], #32] \n" \ - " fmla v0.4s, v16.4s, v24.4s \n" \ - " fmla v1.4s, v17.4s, v25.4s \n" \ - " ldp q20, q21, [%[X_], #64] \n" \ - " ldp q28, q29, [%[Y_], #64] \n" \ - " fmla v2.4s, v18.4s, v26.4s \n" \ - " fmla v3.4s, v19.4s, v27.4s \n" \ - " ldp q22, q23, [%[X_], #96] \n" \ - " ldp q30, q31, [%[Y_], #96] \n" \ - " add %[Y_], %[Y_], #128 \n" \ - " add %[X_], %[X_], #128 \n" \ - " fmla v4.4s, v20.4s, v28.4s \n" \ - " fmla v5.4s, v21.4s, v29.4s \n" \ - " PRFM PLDL1KEEP, [%[X_], #896] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ - " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ - " fmla v6.4s, v22.4s, v30.4s \n" \ - " fmla v7.4s, v23.4s, v31.4s \n" \ - " ldp q16, q17, [%[X_]] \n" \ - " ldp q24, q25, [%[Y_]] \n" \ - " ldp q18, q19, [%[X_], #32] \n" \ - " ldp q26, q27, [%[Y_], #32] \n" \ - " fmla v0.4s, v16.4s, v24.4s \n" \ - " fmla v1.4s, v17.4s, v25.4s \n" \ - " ldp q20, q21, [%[X_], #64] \n" \ - " ldp q28, q29, [%[Y_], #64] \n" \ - " fmla v2.4s, v18.4s, v26.4s \n" \ - " fmla v3.4s, v19.4s, v27.4s \n" \ - " ldp q22, q23, [%[X_], #96] \n" \ - " ldp q30, q31, [%[Y_], #96] \n" \ - " add %[Y_], %[Y_], #128 \n" \ - " add %[X_], %[X_], #128 \n" \ - " fmla v4.4s, v20.4s, v28.4s \n" \ - " fmla v5.4s, v21.4s, v29.4s \n" \ - " PRFM PLDL1KEEP, [%[X_], #896] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ - " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ - " fmla v6.4s, v22.4s, v30.4s \n" \ - " fmla v7.4s, v23.4s, v31.4s \n" - -#define KERNEL_F_FINALIZE \ - " fadd v0.4s, v0.4s, v1.4s \n" \ - " fadd v2.4s, v2.4s, v3.4s \n" \ - " fadd v4.4s, v4.4s, v5.4s \n" \ - " fadd v6.4s, v6.4s, v7.4s \n" \ - " fadd v0.4s, v0.4s, v2.4s \n" \ - " fadd v4.4s, v4.4s, v6.4s \n" \ - " fadd v0.4s, v0.4s, v4.4s \n" \ - " faddp v0.4s, v0.4s, v0.4s \n" \ - " faddp v0.4s, v0.4s, v0.4s \n" - -#else /* !defined(DSDOT) */ -#define KERNEL_F1 \ - " ldr "TMPX", [%[X_]] \n" \ - " ldr "TMPY", [%[Y_]] \n" \ - " add %[X_], %[X_], %[INCX_] \n" \ - " add %[Y_], %[Y_], %[INCY_] \n" \ - " fcvt "TMPX1", "TMPX" \n" \ - " fcvt "TMPY1", "TMPY" \n" \ - " fmul "TMPX1", "TMPX1", "TMPY1" \n" \ - " fadd "OUT", "OUT", "TMPX1" \n" - - -#define KERNEL_F \ - " ldp q18, q19, [%[X_]] \n" \ - " ldp q26, q27, [%[Y_]] \n" \ - " fcvtl v16.2d, v18.2s \n" \ - " fcvtl2 v17.2d, v18.4s \n" \ - " fcvtl v18.2d, v19.2s \n" \ - " fcvtl2 v19.2d, v19.4s \n" \ - " fcvtl v24.2d, v26.2s \n" \ - " fcvtl2 v25.2d, v26.4s \n" \ - " fcvtl v26.2d, v27.2s \n" \ - " fcvtl2 v27.2d, v27.4s \n" \ - " ldp q22, q23, [%[X_], #32] \n" \ - " ldp q30, q31, [%[Y_], #32] \n" \ - " fcvtl v20.2d, v22.2s \n" \ - " fcvtl2 v21.2d, v22.4s \n" \ - " fcvtl v22.2d, v23.2s \n" \ - " fcvtl2 v23.2d, v23.4s \n" \ - " fcvtl v28.2d, v30.2s \n" \ - " fcvtl2 v29.2d, v30.4s \n" \ - " fcvtl v30.2d, v31.2s \n" \ - " fcvtl2 v31.2d, v31.4s \n" \ - " PRFM PLDL1KEEP, [%[X_], #896] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ - " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ - " fmla v0.2d, v16.2d, v24.2d \n" \ - " fmla v1.2d, v17.2d, v25.2d \n" \ - " fmla v2.2d, v18.2d, v26.2d \n" \ - " fmla v3.2d, v19.2d, v27.2d \n" \ - " add %[Y_], %[Y_], #64 \n" \ - " add %[X_], %[X_], #64 \n" \ - " fmla v4.2d, v20.2d, v28.2d \n" \ - " fmla v5.2d, v21.2d, v29.2d \n" \ - " fmla v6.2d, v22.2d, v30.2d \n" \ - " fmla v7.2d, v23.2d, v31.2d \n" - -#define KERNEL_F_FINALIZE \ - " fadd v0.2d, v0.2d, v1.2d \n" \ - " fadd v2.2d, v2.2d, v3.2d \n" \ - " fadd v4.2d, v4.2d, v5.2d \n" \ - " fadd v6.2d, v6.2d, v7.2d \n" \ - " fadd v0.2d, v0.2d, v2.2d \n" \ - " fadd v4.2d, v4.2d, v6.2d \n" \ - " fadd v0.2d, v0.2d, v4.2d \n" \ - " faddp "OUT", v0.2d \n" -#endif /* !defined(DSDOT) */ - -#else /* !defined(DOUBLE) */ -#define KERNEL_F1 \ - " ldr "TMPX", [%[X_]] \n" \ - " ldr "TMPY", [%[Y_]] \n" \ - " add %[X_], %[X_], %[INCX_] \n" \ - " add %[Y_], %[Y_], %[INCY_] \n" \ - " fmadd "OUT", "TMPX", "TMPY", "OUT" \n" - -#define KERNEL_F \ - " ldp q16, q17, [%[X_]] \n" \ - " ldp q24, q25, [%[Y_]] \n" \ - " ldp q18, q19, [%[X_], #32] \n" \ - " ldp q26, q27, [%[Y_], #32] \n" \ - " fmla v0.2d, v16.2d, v24.2d \n" \ - " fmla v1.2d, v17.2d, v25.2d \n" \ - " ldp q20, q21, [%[X_], #64] \n" \ - " ldp q28, q29, [%[Y_], #64] \n" \ - " fmla v2.2d, v18.2d, v26.2d \n" \ - " fmla v3.2d, v19.2d, v27.2d \n" \ - " ldp q22, q23, [%[X_], #96] \n" \ - " ldp q30, q31, [%[Y_], #96] \n" \ - " add %[Y_], %[Y_], #128 \n" \ - " add %[X_], %[X_], #128 \n" \ - " fmla v4.2d, v20.2d, v28.2d \n" \ - " fmla v5.2d, v21.2d, v29.2d \n" \ - " PRFM PLDL1KEEP, [%[X_], #896] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ - " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ - " fmla v6.2d, v22.2d, v30.2d \n" \ - " fmla v7.2d, v23.2d, v31.2d \n" \ - " ldp q16, q17, [%[X_]] \n" \ - " ldp q24, q25, [%[Y_]] \n" \ - " ldp q18, q19, [%[X_], #32] \n" \ - " ldp q26, q27, [%[Y_], #32] \n" \ - " fmla v0.2d, v16.2d, v24.2d \n" \ - " fmla v1.2d, v17.2d, v25.2d \n" \ - " ldp q20, q21, [%[X_], #64] \n" \ - " ldp q28, q29, [%[Y_], #64] \n" \ - " fmla v2.2d, v18.2d, v26.2d \n" \ - " fmla v3.2d, v19.2d, v27.2d \n" \ - " ldp q22, q23, [%[X_], #96] \n" \ - " ldp q30, q31, [%[Y_], #96] \n" \ - " add %[Y_], %[Y_], #128 \n" \ - " add %[X_], %[X_], #128 \n" \ - " fmla v4.2d, v20.2d, v28.2d \n" \ - " fmla v5.2d, v21.2d, v29.2d \n" \ - " PRFM PLDL1KEEP, [%[X_], #896] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ - " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ - " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ - " fmla v6.2d, v22.2d, v30.2d \n" \ - " fmla v7.2d, v23.2d, v31.2d \n" - -#define KERNEL_F_FINALIZE \ - " fadd v0.2d, v0.2d, v1.2d \n" \ - " fadd v2.2d, v2.2d, v3.2d \n" \ - " fadd v4.2d, v4.2d, v5.2d \n" \ - " fadd v6.2d, v6.2d, v7.2d \n" \ - " fadd v0.2d, v0.2d, v2.2d \n" \ - " fadd v4.2d, v4.2d, v6.2d \n" \ - " fadd v0.2d, v0.2d, v4.2d \n" \ - " faddp "OUT", v0.2d \n" -#endif /* !defined(DOUBLE) */ - -static RETURN_TYPE dot_kernel_asimd(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) -{ - RETURN_TYPE dot = 0.0; - BLASLONG j = 0; - - __asm__ __volatile__ ( - " fmov "OUT", "REG0" \n" - " fmov d1, xzr \n" - " fmov d2, xzr \n" - " fmov d3, xzr \n" - " fmov d4, xzr \n" - " fmov d5, xzr \n" - " fmov d6, xzr \n" - " fmov d7, xzr \n" - " cmp %[INCX_], #1 \n" - " bne 5f //dot_kernel_S_BEGIN \n" - " cmp %[INCY_], #1 \n" - " bne 5f //dot_kernel_S_BEGIN \n" - - "1: //dot_kernel_F_BEGIN: \n" - " lsl %[INCX_], %[INCX_], "INC_SHIFT" \n" - " lsl %[INCY_], %[INCY_], "INC_SHIFT" \n" - " asr %[J_], %[N_], #"N_DIV_SHIFT" \n" - " cmp %[J_], xzr \n" - " beq 3f //dot_kernel_F1 \n" - - " .align 5 \n" - "2: //dot_kernel_F: \n" - " "KERNEL_F" \n" - " subs %[J_], %[J_], #1 \n" - " bne 2b //dot_kernel_F \n" - " "KERNEL_F_FINALIZE" \n" - - "3: //dot_kernel_F1: \n" - " ands %[J_], %[N_], #"N_REM_MASK" \n" - " ble 9f //dot_kernel_L999 \n" - - "4: //dot_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs %[J_], %[J_], #1 \n" - " bne 4b //dot_kernel_F10 \n" - " b 9f //dot_kernel_L999 \n" - - "5: //dot_kernel_S_BEGIN: \n" - " lsl %[INCX_], %[INCX_], "INC_SHIFT" \n" - " lsl %[INCY_], %[INCY_], "INC_SHIFT" \n" - " asr %[J_], %[N_], #2 \n" - " cmp %[J_], xzr \n" - " ble 7f //dot_kernel_S1 \n" - - "6: //dot_kernel_S4: \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " subs %[J_], %[J_], #1 \n" - " bne 6b //dot_kernel_S4 \n" - - "7: //dot_kernel_S1: \n" - " ands %[J_], %[N_], #3 \n" - " ble 9f //dot_kernel_L999 \n" - - "8: //dot_kernel_S10: \n" - " "KERNEL_F1" \n" - " subs %[J_], %[J_], #1 \n" - " bne 8b //dot_kernel_S10 \n" - - "9: //dot_kernel_L999: \n" - - : [DOT_] "=&w" (dot) - : [N_] "r" (n), - [X_] "r" (x), - [INCX_] "r" (inc_x), - [Y_] "r" (y), - [INCY_] "r" (inc_y), - [J_] "r" (j) - : "cc", - "memory", - "d1", "d2", "d3", "d4", "d5", "d6", "d7" - ); - - return dot; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +Copyright (c) 2022, Arm Ltd +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#include + +#if !defined(DSDOT) +#define RETURN_TYPE FLOAT +#else +#define RETURN_TYPE double +#endif + +#if !defined(DOUBLE) +#if !defined(DSDOT) +#define DOT_MOD "s" +#define REG0 "wzr" +#define TMPX "s16" +#define TMPY "s24" +#define INC_SHIFT "2" +#define N_DIV_SHIFT "6" +#define N_REM_MASK "63" +#else +#define DOT_MOD "d" +#define REG0 "xzr" +#define TMPX "s16" +#define TMPX1 "d2" +#define TMPY "s24" +#define TMPY1 "d3" +#define INC_SHIFT "2" +#define N_DIV_SHIFT "4" +#define N_REM_MASK "15" +#endif +#else +#define DOT_MOD "d" +#define REG0 "xzr" +#define TMPX "d16" +#define TMPY "d24" +#define INC_SHIFT "3" +#define N_DIV_SHIFT "5" +#define N_REM_MASK "31" +#endif + +#define OUT "%"DOT_MOD"[DOT_]" + +#if !defined(DOUBLE) + +#if !defined(DSDOT) +#define KERNEL_F1 \ + " ldr "TMPX", [%[X_]] \n" \ + " ldr "TMPY", [%[Y_]] \n" \ + " add %[X_], %[X_], %[INCX_] \n" \ + " add %[Y_], %[Y_], %[INCY_] \n" \ + " fmadd "OUT", "TMPX", "TMPY", "OUT" \n" + +#define KERNEL_F \ + " ldp q16, q17, [%[X_]] \n" \ + " ldp q24, q25, [%[Y_]] \n" \ + " ldp q18, q19, [%[X_], #32] \n" \ + " ldp q26, q27, [%[Y_], #32] \n" \ + " fmla v0.4s, v16.4s, v24.4s \n" \ + " fmla v1.4s, v17.4s, v25.4s \n" \ + " ldp q20, q21, [%[X_], #64] \n" \ + " ldp q28, q29, [%[Y_], #64] \n" \ + " fmla v2.4s, v18.4s, v26.4s \n" \ + " fmla v3.4s, v19.4s, v27.4s \n" \ + " ldp q22, q23, [%[X_], #96] \n" \ + " ldp q30, q31, [%[Y_], #96] \n" \ + " add %[Y_], %[Y_], #128 \n" \ + " add %[X_], %[X_], #128 \n" \ + " fmla v4.4s, v20.4s, v28.4s \n" \ + " fmla v5.4s, v21.4s, v29.4s \n" \ + " PRFM PLDL1KEEP, [%[X_], #896] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ + " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ + " fmla v6.4s, v22.4s, v30.4s \n" \ + " fmla v7.4s, v23.4s, v31.4s \n" \ + " ldp q16, q17, [%[X_]] \n" \ + " ldp q24, q25, [%[Y_]] \n" \ + " ldp q18, q19, [%[X_], #32] \n" \ + " ldp q26, q27, [%[Y_], #32] \n" \ + " fmla v0.4s, v16.4s, v24.4s \n" \ + " fmla v1.4s, v17.4s, v25.4s \n" \ + " ldp q20, q21, [%[X_], #64] \n" \ + " ldp q28, q29, [%[Y_], #64] \n" \ + " fmla v2.4s, v18.4s, v26.4s \n" \ + " fmla v3.4s, v19.4s, v27.4s \n" \ + " ldp q22, q23, [%[X_], #96] \n" \ + " ldp q30, q31, [%[Y_], #96] \n" \ + " add %[Y_], %[Y_], #128 \n" \ + " add %[X_], %[X_], #128 \n" \ + " fmla v4.4s, v20.4s, v28.4s \n" \ + " fmla v5.4s, v21.4s, v29.4s \n" \ + " PRFM PLDL1KEEP, [%[X_], #896] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ + " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ + " fmla v6.4s, v22.4s, v30.4s \n" \ + " fmla v7.4s, v23.4s, v31.4s \n" + +#define KERNEL_F_FINALIZE \ + " fadd v0.4s, v0.4s, v1.4s \n" \ + " fadd v2.4s, v2.4s, v3.4s \n" \ + " fadd v4.4s, v4.4s, v5.4s \n" \ + " fadd v6.4s, v6.4s, v7.4s \n" \ + " fadd v0.4s, v0.4s, v2.4s \n" \ + " fadd v4.4s, v4.4s, v6.4s \n" \ + " fadd v0.4s, v0.4s, v4.4s \n" \ + " faddp v0.4s, v0.4s, v0.4s \n" \ + " faddp v0.4s, v0.4s, v0.4s \n" + +#else /* !defined(DSDOT) */ +#define KERNEL_F1 \ + " ldr "TMPX", [%[X_]] \n" \ + " ldr "TMPY", [%[Y_]] \n" \ + " add %[X_], %[X_], %[INCX_] \n" \ + " add %[Y_], %[Y_], %[INCY_] \n" \ + " fcvt "TMPX1", "TMPX" \n" \ + " fcvt "TMPY1", "TMPY" \n" \ + " fmul "TMPX1", "TMPX1", "TMPY1" \n" \ + " fadd "OUT", "OUT", "TMPX1" \n" + + +#define KERNEL_F \ + " ldp q18, q19, [%[X_]] \n" \ + " ldp q26, q27, [%[Y_]] \n" \ + " fcvtl v16.2d, v18.2s \n" \ + " fcvtl2 v17.2d, v18.4s \n" \ + " fcvtl v18.2d, v19.2s \n" \ + " fcvtl2 v19.2d, v19.4s \n" \ + " fcvtl v24.2d, v26.2s \n" \ + " fcvtl2 v25.2d, v26.4s \n" \ + " fcvtl v26.2d, v27.2s \n" \ + " fcvtl2 v27.2d, v27.4s \n" \ + " ldp q22, q23, [%[X_], #32] \n" \ + " ldp q30, q31, [%[Y_], #32] \n" \ + " fcvtl v20.2d, v22.2s \n" \ + " fcvtl2 v21.2d, v22.4s \n" \ + " fcvtl v22.2d, v23.2s \n" \ + " fcvtl2 v23.2d, v23.4s \n" \ + " fcvtl v28.2d, v30.2s \n" \ + " fcvtl2 v29.2d, v30.4s \n" \ + " fcvtl v30.2d, v31.2s \n" \ + " fcvtl2 v31.2d, v31.4s \n" \ + " PRFM PLDL1KEEP, [%[X_], #896] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ + " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ + " fmla v0.2d, v16.2d, v24.2d \n" \ + " fmla v1.2d, v17.2d, v25.2d \n" \ + " fmla v2.2d, v18.2d, v26.2d \n" \ + " fmla v3.2d, v19.2d, v27.2d \n" \ + " add %[Y_], %[Y_], #64 \n" \ + " add %[X_], %[X_], #64 \n" \ + " fmla v4.2d, v20.2d, v28.2d \n" \ + " fmla v5.2d, v21.2d, v29.2d \n" \ + " fmla v6.2d, v22.2d, v30.2d \n" \ + " fmla v7.2d, v23.2d, v31.2d \n" + +#define KERNEL_F_FINALIZE \ + " fadd v0.2d, v0.2d, v1.2d \n" \ + " fadd v2.2d, v2.2d, v3.2d \n" \ + " fadd v4.2d, v4.2d, v5.2d \n" \ + " fadd v6.2d, v6.2d, v7.2d \n" \ + " fadd v0.2d, v0.2d, v2.2d \n" \ + " fadd v4.2d, v4.2d, v6.2d \n" \ + " fadd v0.2d, v0.2d, v4.2d \n" \ + " faddp "OUT", v0.2d \n" +#endif /* !defined(DSDOT) */ + +#else /* !defined(DOUBLE) */ +#define KERNEL_F1 \ + " ldr "TMPX", [%[X_]] \n" \ + " ldr "TMPY", [%[Y_]] \n" \ + " add %[X_], %[X_], %[INCX_] \n" \ + " add %[Y_], %[Y_], %[INCY_] \n" \ + " fmadd "OUT", "TMPX", "TMPY", "OUT" \n" + +#define KERNEL_F \ + " ldp q16, q17, [%[X_]] \n" \ + " ldp q24, q25, [%[Y_]] \n" \ + " ldp q18, q19, [%[X_], #32] \n" \ + " ldp q26, q27, [%[Y_], #32] \n" \ + " fmla v0.2d, v16.2d, v24.2d \n" \ + " fmla v1.2d, v17.2d, v25.2d \n" \ + " ldp q20, q21, [%[X_], #64] \n" \ + " ldp q28, q29, [%[Y_], #64] \n" \ + " fmla v2.2d, v18.2d, v26.2d \n" \ + " fmla v3.2d, v19.2d, v27.2d \n" \ + " ldp q22, q23, [%[X_], #96] \n" \ + " ldp q30, q31, [%[Y_], #96] \n" \ + " add %[Y_], %[Y_], #128 \n" \ + " add %[X_], %[X_], #128 \n" \ + " fmla v4.2d, v20.2d, v28.2d \n" \ + " fmla v5.2d, v21.2d, v29.2d \n" \ + " PRFM PLDL1KEEP, [%[X_], #896] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ + " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ + " fmla v6.2d, v22.2d, v30.2d \n" \ + " fmla v7.2d, v23.2d, v31.2d \n" \ + " ldp q16, q17, [%[X_]] \n" \ + " ldp q24, q25, [%[Y_]] \n" \ + " ldp q18, q19, [%[X_], #32] \n" \ + " ldp q26, q27, [%[Y_], #32] \n" \ + " fmla v0.2d, v16.2d, v24.2d \n" \ + " fmla v1.2d, v17.2d, v25.2d \n" \ + " ldp q20, q21, [%[X_], #64] \n" \ + " ldp q28, q29, [%[Y_], #64] \n" \ + " fmla v2.2d, v18.2d, v26.2d \n" \ + " fmla v3.2d, v19.2d, v27.2d \n" \ + " ldp q22, q23, [%[X_], #96] \n" \ + " ldp q30, q31, [%[Y_], #96] \n" \ + " add %[Y_], %[Y_], #128 \n" \ + " add %[X_], %[X_], #128 \n" \ + " fmla v4.2d, v20.2d, v28.2d \n" \ + " fmla v5.2d, v21.2d, v29.2d \n" \ + " PRFM PLDL1KEEP, [%[X_], #896] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896] \n" \ + " PRFM PLDL1KEEP, [%[X_], #896+64] \n" \ + " PRFM PLDL1KEEP, [%[Y_], #896+64] \n" \ + " fmla v6.2d, v22.2d, v30.2d \n" \ + " fmla v7.2d, v23.2d, v31.2d \n" + +#define KERNEL_F_FINALIZE \ + " fadd v0.2d, v0.2d, v1.2d \n" \ + " fadd v2.2d, v2.2d, v3.2d \n" \ + " fadd v4.2d, v4.2d, v5.2d \n" \ + " fadd v6.2d, v6.2d, v7.2d \n" \ + " fadd v0.2d, v0.2d, v2.2d \n" \ + " fadd v4.2d, v4.2d, v6.2d \n" \ + " fadd v0.2d, v0.2d, v4.2d \n" \ + " faddp "OUT", v0.2d \n" +#endif /* !defined(DOUBLE) */ + +static RETURN_TYPE dot_kernel_asimd(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ + RETURN_TYPE dot = 0.0; + BLASLONG j = 0; + + __asm__ __volatile__ ( + " fmov "OUT", "REG0" \n" + " fmov d1, xzr \n" + " fmov d2, xzr \n" + " fmov d3, xzr \n" + " fmov d4, xzr \n" + " fmov d5, xzr \n" + " fmov d6, xzr \n" + " fmov d7, xzr \n" + " cmp %[INCX_], #1 \n" + " bne 5f //dot_kernel_S_BEGIN \n" + " cmp %[INCY_], #1 \n" + " bne 5f //dot_kernel_S_BEGIN \n" + + "1: //dot_kernel_F_BEGIN: \n" + " lsl %[INCX_], %[INCX_], "INC_SHIFT" \n" + " lsl %[INCY_], %[INCY_], "INC_SHIFT" \n" + " asr %[J_], %[N_], #"N_DIV_SHIFT" \n" + " cmp %[J_], xzr \n" + " beq 3f //dot_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) + " .align 5 \n" +#endif + "2: //dot_kernel_F: \n" + " "KERNEL_F" \n" + " subs %[J_], %[J_], #1 \n" + " bne 2b //dot_kernel_F \n" + " "KERNEL_F_FINALIZE" \n" + + "3: //dot_kernel_F1: \n" + " ands %[J_], %[N_], #"N_REM_MASK" \n" + " ble 9f //dot_kernel_L999 \n" + + "4: //dot_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs %[J_], %[J_], #1 \n" + " bne 4b //dot_kernel_F10 \n" + " b 9f //dot_kernel_L999 \n" + + "5: //dot_kernel_S_BEGIN: \n" + " lsl %[INCX_], %[INCX_], "INC_SHIFT" \n" + " lsl %[INCY_], %[INCY_], "INC_SHIFT" \n" + " asr %[J_], %[N_], #2 \n" + " cmp %[J_], xzr \n" + " ble 7f //dot_kernel_S1 \n" + + "6: //dot_kernel_S4: \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " subs %[J_], %[J_], #1 \n" + " bne 6b //dot_kernel_S4 \n" + + "7: //dot_kernel_S1: \n" + " ands %[J_], %[N_], #3 \n" + " ble 9f //dot_kernel_L999 \n" + + "8: //dot_kernel_S10: \n" + " "KERNEL_F1" \n" + " subs %[J_], %[J_], #1 \n" + " bne 8b //dot_kernel_S10 \n" + + "9: //dot_kernel_L999: \n" + + : [DOT_] "=&w" (dot) + : [N_] "r" (n), + [X_] "r" (x), + [INCX_] "r" (inc_x), + [Y_] "r" (y), + [INCY_] "r" (inc_y), + [J_] "r" (j) + : "cc", + "memory", + "d1", "d2", "d3", "d4", "d5", "d6", "d7" + ); + + return dot; +} diff --git a/kernel/arm64/sasum_thunderx2t99.c b/kernel/arm64/sasum_thunderx2t99.c index 014c667bac..7e7ef96a5a 100644 --- a/kernel/arm64/sasum_thunderx2t99.c +++ b/kernel/arm64/sasum_thunderx2t99.c @@ -1,262 +1,263 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -#include "common.h" - -#include - -#define N "x0" /* vector length */ -#define X "x1" /* "X" vector address */ -#define INC_X "x2" /* "X" stride */ -#define J "x5" /* loop variable */ - -#define REG0 "wzr" -#define SUMF "s0" -#define SUMFD "d0" - -/******************************************************************************/ - -#define KERNEL_F1 \ - "ldr s1, ["X"] \n" \ - "add "X", "X", #4 \n" \ - "fabs s1, s1 \n" \ - "fadd "SUMF", "SUMF", s1 \n" - -#define KERNEL_F64 \ - "ldr q16, ["X"] \n" \ - "ldr q17, ["X", #16] \n" \ - "ldr q18, ["X", #32] \n" \ - "ldr q19, ["X", #48] \n" \ - "ldp q20, q21, ["X", #64] \n" \ - "ldp q22, q23, ["X", #96] \n" \ - "fabs v16.4s, v16.4s \n" \ - "fabs v17.4s, v17.4s \n" \ - "fabs v18.4s, v18.4s \n" \ - "fabs v19.4s, v19.4s \n" \ - "ldp q24, q25, ["X", #128] \n" \ - "ldp q26, q27, ["X", #160] \n" \ - "fabs v20.4s, v20.4s \n" \ - "fabs v21.4s, v21.4s \n" \ - "fabs v22.4s, v22.4s \n" \ - "fabs v23.4s, v23.4s \n" \ - "fadd v16.4s, v16.4s, v17.4s \n" \ - "fadd v18.4s, v18.4s, v19.4s \n" \ - "ldp q28, q29, ["X", #192] \n" \ - "ldp q30, q31, ["X", #224] \n" \ - "fabs v24.4s, v24.4s \n" \ - "fabs v25.4s, v25.4s \n" \ - "fabs v26.4s, v26.4s \n" \ - "fabs v27.4s, v27.4s \n" \ - "add "X", "X", #256 \n" \ - "fadd v20.4s, v20.4s, v21.4s \n" \ - "fadd v22.4s, v22.4s, v23.4s \n" \ - "fabs v28.4s, v28.4s \n" \ - "fabs v29.4s, v29.4s \n" \ - "fabs v30.4s, v30.4s \n" \ - "fabs v31.4s, v31.4s \n" \ - "PRFM PLDL1KEEP, ["X", #1024] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ - "fadd v24.4s, v24.4s, v25.4s \n" \ - "fadd v26.4s, v26.4s, v27.4s \n" \ - "fadd v0.4s, v0.4s, v16.4s \n" \ - "fadd v1.4s, v1.4s, v18.4s \n" \ - "fadd v2.4s, v2.4s, v20.4s \n" \ - "fadd v3.4s, v3.4s, v22.4s \n" \ - "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ - "fadd v28.4s, v28.4s, v29.4s \n" \ - "fadd v30.4s, v30.4s, v31.4s \n" \ - "fadd v4.4s, v4.4s, v24.4s \n" \ - "fadd v5.4s, v5.4s, v26.4s \n" \ - "fadd v6.4s, v6.4s, v28.4s \n" \ - "fadd v7.4s, v7.4s, v30.4s \n" - -#define KERNEL_F64_FINALIZE \ - "fadd v0.4s, v0.4s, v1.4s \n" \ - "fadd v2.4s, v2.4s, v3.4s \n" \ - "fadd v4.4s, v4.4s, v5.4s \n" \ - "fadd v6.4s, v6.4s, v7.4s \n" \ - "fadd v0.4s, v0.4s, v2.4s \n" \ - "fadd v4.4s, v4.4s, v6.4s \n" \ - "fadd v0.4s, v0.4s, v4.4s \n" \ - "ext v1.16b, v0.16b, v0.16b, #8 \n" \ - "fadd v0.2s, v0.2s, v1.2s \n" \ - "faddp "SUMF", v0.2s \n" - -#define INIT_S \ - "lsl "INC_X", "INC_X", #2 \n" - -#define KERNEL_S1 \ - "ldr s1, ["X"] \n" \ - "add "X", "X", "INC_X" \n" \ - "fabs s1, s1 \n" \ - "fadd "SUMF", "SUMF", s1 \n" - - -#if defined(SMP) -extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, - BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, - void *c, BLASLONG ldc, int (*function)(), int nthreads); -#endif - - -static FLOAT sasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ - FLOAT asum = 0.0 ; - - if ( n < 0 ) return(asum); - - __asm__ __volatile__ ( - " mov "N", %[N_] \n" - " mov "X", %[X_] \n" - " mov "INC_X", %[INCX_] \n" - " fmov "SUMF", "REG0" \n" - " fmov s1, "REG0" \n" - " fmov s2, "REG0" \n" - " fmov s3, "REG0" \n" - " fmov s4, "REG0" \n" - " fmov s5, "REG0" \n" - " fmov s6, "REG0" \n" - " fmov s7, "REG0" \n" - " cmp "N", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", #1 \n" - " bne 5f //asum_kernel_S_BEGIN \n" - - "1: //asum_kernel_F_BEGIN: \n" - " asr "J", "N", #6 \n" - " cmp "J", xzr \n" - " beq 3f //asum_kernel_F1 \n" - - ".align 5 \n" - "2: //asum_kernel_F64: \n" - " "KERNEL_F64" \n" - " subs "J", "J", #1 \n" - " bne 2b //asum_kernel_F64 \n" - " "KERNEL_F64_FINALIZE" \n" - - "3: //asum_kernel_F1: \n" - " ands "J", "N", #63 \n" - " ble 9f //asum_kernel_L999 \n" - - "4: //asum_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 4b //asum_kernel_F10 \n" - " b 9f //asum_kernel_L999 \n" - - "5: //asum_kernel_S_BEGIN: \n" - " "INIT_S" \n" - " asr "J", "N", #2 \n" - " cmp "J", xzr \n" - " ble 7f //asum_kernel_S1 \n" - - "6: //asum_kernel_S4: \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 6b //asum_kernel_S4 \n" - - "7: //asum_kernel_S1: \n" - " ands "J", "N", #3 \n" - " ble 9f //asum_kernel_L999 \n" - - "8: //asum_kernel_S10: \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 8b //asum_kernel_S10 \n" - - "9: //asum_kernel_L999: \n" - " fmov %[ASUM_], "SUMFD" \n" - - : [ASUM_] "=r" (asum) //%0 - : [N_] "r" (n), //%1 - [X_] "r" (x), //%2 - [INCX_] "r" (inc_x) //%3 - : "cc", - "memory", - "x0", "x1", "x2", "x3", "x4", "x5", - "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" - ); - - return asum; -} - -#if defined(SMP) -static int sasum_thread_function(BLASLONG n, BLASLONG dummy0, - BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, - BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) -{ - *result = sasum_compute(n, x, inc_x); - - return 0; -} -#endif - -FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ -#if defined(SMP) - int nthreads; - FLOAT dummy_alpha; -#endif - FLOAT asum = 0.0; - -#if defined(SMP) - if (inc_x == 0 || n <= 10000) - nthreads = 1; - else - nthreads = num_cpu_avail(1); - - if (nthreads == 1) { - asum = sasum_compute(n, x, inc_x); - } else { - int mode, i; - char result[MAX_CPU_NUMBER * sizeof(double) * 2]; - FLOAT *ptr; - - mode = BLAS_SINGLE; - - blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, - x, inc_x, NULL, 0, result, 0, - ( void *)sasum_thread_function, nthreads); - - ptr = (FLOAT *)result; - for (i = 0; i < nthreads; i++) { - asum = asum + (*ptr); - ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); - } - } -#else - asum = sasum_compute(n, x, inc_x); -#endif - - return asum; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include + +#define N "x0" /* vector length */ +#define X "x1" /* "X" vector address */ +#define INC_X "x2" /* "X" stride */ +#define J "x5" /* loop variable */ + +#define REG0 "wzr" +#define SUMF "s0" +#define SUMFD "d0" + +/******************************************************************************/ + +#define KERNEL_F1 \ + "ldr s1, ["X"] \n" \ + "add "X", "X", #4 \n" \ + "fabs s1, s1 \n" \ + "fadd "SUMF", "SUMF", s1 \n" + +#define KERNEL_F64 \ + "ldr q16, ["X"] \n" \ + "ldr q17, ["X", #16] \n" \ + "ldr q18, ["X", #32] \n" \ + "ldr q19, ["X", #48] \n" \ + "ldp q20, q21, ["X", #64] \n" \ + "ldp q22, q23, ["X", #96] \n" \ + "fabs v16.4s, v16.4s \n" \ + "fabs v17.4s, v17.4s \n" \ + "fabs v18.4s, v18.4s \n" \ + "fabs v19.4s, v19.4s \n" \ + "ldp q24, q25, ["X", #128] \n" \ + "ldp q26, q27, ["X", #160] \n" \ + "fabs v20.4s, v20.4s \n" \ + "fabs v21.4s, v21.4s \n" \ + "fabs v22.4s, v22.4s \n" \ + "fabs v23.4s, v23.4s \n" \ + "fadd v16.4s, v16.4s, v17.4s \n" \ + "fadd v18.4s, v18.4s, v19.4s \n" \ + "ldp q28, q29, ["X", #192] \n" \ + "ldp q30, q31, ["X", #224] \n" \ + "fabs v24.4s, v24.4s \n" \ + "fabs v25.4s, v25.4s \n" \ + "fabs v26.4s, v26.4s \n" \ + "fabs v27.4s, v27.4s \n" \ + "add "X", "X", #256 \n" \ + "fadd v20.4s, v20.4s, v21.4s \n" \ + "fadd v22.4s, v22.4s, v23.4s \n" \ + "fabs v28.4s, v28.4s \n" \ + "fabs v29.4s, v29.4s \n" \ + "fabs v30.4s, v30.4s \n" \ + "fabs v31.4s, v31.4s \n" \ + "PRFM PLDL1KEEP, ["X", #1024] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ + "fadd v24.4s, v24.4s, v25.4s \n" \ + "fadd v26.4s, v26.4s, v27.4s \n" \ + "fadd v0.4s, v0.4s, v16.4s \n" \ + "fadd v1.4s, v1.4s, v18.4s \n" \ + "fadd v2.4s, v2.4s, v20.4s \n" \ + "fadd v3.4s, v3.4s, v22.4s \n" \ + "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ + "fadd v28.4s, v28.4s, v29.4s \n" \ + "fadd v30.4s, v30.4s, v31.4s \n" \ + "fadd v4.4s, v4.4s, v24.4s \n" \ + "fadd v5.4s, v5.4s, v26.4s \n" \ + "fadd v6.4s, v6.4s, v28.4s \n" \ + "fadd v7.4s, v7.4s, v30.4s \n" + +#define KERNEL_F64_FINALIZE \ + "fadd v0.4s, v0.4s, v1.4s \n" \ + "fadd v2.4s, v2.4s, v3.4s \n" \ + "fadd v4.4s, v4.4s, v5.4s \n" \ + "fadd v6.4s, v6.4s, v7.4s \n" \ + "fadd v0.4s, v0.4s, v2.4s \n" \ + "fadd v4.4s, v4.4s, v6.4s \n" \ + "fadd v0.4s, v0.4s, v4.4s \n" \ + "ext v1.16b, v0.16b, v0.16b, #8 \n" \ + "fadd v0.2s, v0.2s, v1.2s \n" \ + "faddp "SUMF", v0.2s \n" + +#define INIT_S \ + "lsl "INC_X", "INC_X", #2 \n" + +#define KERNEL_S1 \ + "ldr s1, ["X"] \n" \ + "add "X", "X", "INC_X" \n" \ + "fabs s1, s1 \n" \ + "fadd "SUMF", "SUMF", s1 \n" + + +#if defined(SMP) +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, + BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, + void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + + +static FLOAT sasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + FLOAT asum = 0.0 ; + + if ( n < 0 ) return(asum); + + __asm__ __volatile__ ( + " mov "N", %[N_] \n" + " mov "X", %[X_] \n" + " mov "INC_X", %[INCX_] \n" + " fmov "SUMF", "REG0" \n" + " fmov s1, "REG0" \n" + " fmov s2, "REG0" \n" + " fmov s3, "REG0" \n" + " fmov s4, "REG0" \n" + " fmov s5, "REG0" \n" + " fmov s6, "REG0" \n" + " fmov s7, "REG0" \n" + " cmp "N", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", #1 \n" + " bne 5f //asum_kernel_S_BEGIN \n" + + "1: //asum_kernel_F_BEGIN: \n" + " asr "J", "N", #6 \n" + " cmp "J", xzr \n" + " beq 3f //asum_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) + ".align 5 \n" +#endif + "2: //asum_kernel_F64: \n" + " "KERNEL_F64" \n" + " subs "J", "J", #1 \n" + " bne 2b //asum_kernel_F64 \n" + " "KERNEL_F64_FINALIZE" \n" + + "3: //asum_kernel_F1: \n" + " ands "J", "N", #63 \n" + " ble 9f //asum_kernel_L999 \n" + + "4: //asum_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 4b //asum_kernel_F10 \n" + " b 9f //asum_kernel_L999 \n" + + "5: //asum_kernel_S_BEGIN: \n" + " "INIT_S" \n" + " asr "J", "N", #2 \n" + " cmp "J", xzr \n" + " ble 7f //asum_kernel_S1 \n" + + "6: //asum_kernel_S4: \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 6b //asum_kernel_S4 \n" + + "7: //asum_kernel_S1: \n" + " ands "J", "N", #3 \n" + " ble 9f //asum_kernel_L999 \n" + + "8: //asum_kernel_S10: \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 8b //asum_kernel_S10 \n" + + "9: //asum_kernel_L999: \n" + " fmov %[ASUM_], "SUMFD" \n" + + : [ASUM_] "=r" (asum) //%0 + : [N_] "r" (n), //%1 + [X_] "r" (x), //%2 + [INCX_] "r" (inc_x) //%3 + : "cc", + "memory", + "x0", "x1", "x2", "x3", "x4", "x5", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" + ); + + return asum; +} + +#if defined(SMP) +static int sasum_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) +{ + *result = sasum_compute(n, x, inc_x); + + return 0; +} +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT asum = 0.0; + +#if defined(SMP) + if (inc_x == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + asum = sasum_compute(n, x, inc_x); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) * 2]; + FLOAT *ptr; + + mode = BLAS_SINGLE; + + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, NULL, 0, result, 0, + ( void *)sasum_thread_function, nthreads); + + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + asum = asum + (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + asum = sasum_compute(n, x, inc_x); +#endif + + return asum; +} diff --git a/kernel/arm64/zasum_thunderx2t99.c b/kernel/arm64/zasum_thunderx2t99.c index 1d303a9a30..435867c066 100644 --- a/kernel/arm64/zasum_thunderx2t99.c +++ b/kernel/arm64/zasum_thunderx2t99.c @@ -1,262 +1,263 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -#include "common.h" - -#include - -#define N "x0" /* vector length */ -#define X "x1" /* "X" vector address */ -#define INC_X "x2" /* "X" stride */ -#define J "x5" /* loop variable */ - -#define REG0 "xzr" -#define SUMF "d0" -#define TMPF "d1" - -/******************************************************************************/ - -#define KERNEL_F1 \ - "ldr q1, ["X"] \n" \ - "add "X", "X", #16 \n" \ - "fabs v1.2d, v1.2d \n" \ - "faddp d1, v1.2d \n" \ - "fadd "SUMF", "SUMF", d1 \n" - -#define KERNEL_F16 \ - "ldr q16, ["X"] \n" \ - "ldr q17, ["X", #16] \n" \ - "ldr q18, ["X", #32] \n" \ - "ldr q19, ["X", #48] \n" \ - "ldp q20, q21, ["X", #64] \n" \ - "ldp q22, q23, ["X", #96] \n" \ - "fabs v16.2d, v16.2d \n" \ - "fabs v17.2d, v17.2d \n" \ - "fabs v18.2d, v18.2d \n" \ - "fabs v19.2d, v19.2d \n" \ - "ldp q24, q25, ["X", #128] \n" \ - "ldp q26, q27, ["X", #160] \n" \ - "fabs v20.2d, v20.2d \n" \ - "fabs v21.2d, v21.2d \n" \ - "fabs v22.2d, v22.2d \n" \ - "fabs v23.2d, v23.2d \n" \ - "fadd v16.2d, v16.2d, v17.2d \n" \ - "fadd v18.2d, v18.2d, v19.2d \n" \ - "ldp q28, q29, ["X", #192] \n" \ - "ldp q30, q31, ["X", #224] \n" \ - "fabs v24.2d, v24.2d \n" \ - "fabs v25.2d, v25.2d \n" \ - "fabs v26.2d, v26.2d \n" \ - "fabs v27.2d, v27.2d \n" \ - "add "X", "X", #256 \n" \ - "fadd v20.2d, v20.2d, v21.2d \n" \ - "fadd v22.2d, v22.2d, v23.2d \n" \ - "fabs v28.2d, v28.2d \n" \ - "fabs v29.2d, v29.2d \n" \ - "fabs v30.2d, v30.2d \n" \ - "fabs v31.2d, v31.2d \n" \ - "PRFM PLDL1KEEP, ["X", #1024] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ - "fadd v24.2d, v24.2d, v25.2d \n" \ - "fadd v26.2d, v26.2d, v27.2d \n" \ - "fadd v28.2d, v28.2d, v29.2d \n" \ - "fadd v30.2d, v30.2d, v31.2d \n" \ - "fadd v0.2d, v0.2d, v16.2d \n" \ - "fadd v1.2d, v1.2d, v18.2d \n" \ - "fadd v2.2d, v2.2d, v20.2d \n" \ - "fadd v3.2d, v3.2d, v22.2d \n" \ - "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ - "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ - "fadd v4.2d, v4.2d, v24.2d \n" \ - "fadd v5.2d, v5.2d, v26.2d \n" \ - "fadd v6.2d, v6.2d, v28.2d \n" \ - "fadd v7.2d, v7.2d, v30.2d \n" - -#define KERNEL_F16_FINALIZE \ - "fadd v0.2d, v0.2d, v1.2d \n" \ - "fadd v2.2d, v2.2d, v3.2d \n" \ - "fadd v4.2d, v4.2d, v5.2d \n" \ - "fadd v6.2d, v6.2d, v7.2d \n" \ - "fadd v0.2d, v0.2d, v2.2d \n" \ - "fadd v4.2d, v4.2d, v6.2d \n" \ - "fadd v0.2d, v0.2d, v4.2d \n" \ - "faddp "SUMF", v0.2d \n" - -#define INIT_S \ - "lsl "INC_X", "INC_X", #4 \n" - -#define KERNEL_S1 \ - "ldr q1, ["X"] \n" \ - "add "X", "X", "INC_X" \n" \ - "fabs v1.2d, v1.2d \n" \ - "faddp d1, v1.2d \n" \ - "fadd "SUMF", "SUMF", d1 \n" - - -#if defined(SMP) -extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, - BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, - void *c, BLASLONG ldc, int (*function)(), int nthreads); -#endif - - -static FLOAT zasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ - FLOAT asum = 0.0 ; - - if ( n < 0 ) return(asum); - - __asm__ __volatile__ ( - " mov "N", %[N_] \n" - " mov "X", %[X_] \n" - " mov "INC_X", %[INCX_] \n" - " fmov "SUMF", "REG0" \n" - " fmov d1, "REG0" \n" - " fmov d2, "REG0" \n" - " fmov d3, "REG0" \n" - " fmov d4, "REG0" \n" - " fmov d5, "REG0" \n" - " fmov d6, "REG0" \n" - " fmov d7, "REG0" \n" - " cmp "N", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", xzr \n" - " ble 9f //asum_kernel_L999 \n" - " cmp "INC_X", #1 \n" - " bne 5f //asum_kernel_S_BEGIN \n" - - "1: //asum_kernel_F_BEGIN: \n" - " asr "J", "N", #4 \n" - " cmp "J", xzr \n" - " beq 3f //asum_kernel_F1 \n" - - ".align 5 \n" - "2: //asum_kernel_F16: \n" - " "KERNEL_F16" \n" - " subs "J", "J", #1 \n" - " bne 2b //asum_kernel_F16 \n" - " "KERNEL_F16_FINALIZE" \n" - - "3: //asum_kernel_F1: \n" - " ands "J", "N", #15 \n" - " ble 9f //asum_kernel_L999 \n" - - "4: //asum_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 4b //asum_kernel_F10 \n" - " b 9f //asum_kernel_L999 \n" - - "5: //asum_kernel_S_BEGIN: \n" - " "INIT_S" \n" - " asr "J", "N", #2 \n" - " cmp "J", xzr \n" - " ble 7f //asum_kernel_S1 \n" - - "6: //asum_kernel_S4: \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 6b //asum_kernel_S4 \n" - - "7: //asum_kernel_S1: \n" - " ands "J", "N", #3 \n" - " ble 9f //asum_kernel_L999 \n" - - "8: //asum_kernel_S10: \n" - " "KERNEL_S1" \n" - " subs "J", "J", #1 \n" - " bne 8b //asum_kernel_S10 \n" - - "9: //asum_kernel_L999: \n" - " fmov %[ASUM_], "SUMF" \n" - - : [ASUM_] "=r" (asum) //%0 - : [N_] "r" (n), //%1 - [X_] "r" (x), //%2 - [INCX_] "r" (inc_x) //%3 - : "cc", - "memory", - "x0", "x1", "x2", "x3", "x4", "x5", - "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" - ); - - return asum; -} - -#if defined(SMP) -static int zasum_thread_function(BLASLONG n, BLASLONG dummy0, - BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, - BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) -{ - *result = zasum_compute(n, x, inc_x); - - return 0; -} -#endif - -FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) -{ -#if defined(SMP) - int nthreads; - FLOAT dummy_alpha; -#endif - FLOAT asum = 0.0; - -#if defined(SMP) - if (inc_x == 0 || n <= 10000) - nthreads = 1; - else - nthreads = num_cpu_avail(1); - - if (nthreads == 1) { - asum = zasum_compute(n, x, inc_x); - } else { - int mode, i; - char result[MAX_CPU_NUMBER * sizeof(double) * 2]; - FLOAT *ptr; - - mode = BLAS_DOUBLE | BLAS_COMPLEX; - - blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, - x, inc_x, NULL, 0, result, 0, - ( void *)zasum_thread_function, nthreads); - - ptr = (FLOAT *)result; - for (i = 0; i < nthreads; i++) { - asum = asum + (*ptr); - ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); - } - } -#else - asum = zasum_compute(n, x, inc_x); -#endif - - return asum; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include + +#define N "x0" /* vector length */ +#define X "x1" /* "X" vector address */ +#define INC_X "x2" /* "X" stride */ +#define J "x5" /* loop variable */ + +#define REG0 "xzr" +#define SUMF "d0" +#define TMPF "d1" + +/******************************************************************************/ + +#define KERNEL_F1 \ + "ldr q1, ["X"] \n" \ + "add "X", "X", #16 \n" \ + "fabs v1.2d, v1.2d \n" \ + "faddp d1, v1.2d \n" \ + "fadd "SUMF", "SUMF", d1 \n" + +#define KERNEL_F16 \ + "ldr q16, ["X"] \n" \ + "ldr q17, ["X", #16] \n" \ + "ldr q18, ["X", #32] \n" \ + "ldr q19, ["X", #48] \n" \ + "ldp q20, q21, ["X", #64] \n" \ + "ldp q22, q23, ["X", #96] \n" \ + "fabs v16.2d, v16.2d \n" \ + "fabs v17.2d, v17.2d \n" \ + "fabs v18.2d, v18.2d \n" \ + "fabs v19.2d, v19.2d \n" \ + "ldp q24, q25, ["X", #128] \n" \ + "ldp q26, q27, ["X", #160] \n" \ + "fabs v20.2d, v20.2d \n" \ + "fabs v21.2d, v21.2d \n" \ + "fabs v22.2d, v22.2d \n" \ + "fabs v23.2d, v23.2d \n" \ + "fadd v16.2d, v16.2d, v17.2d \n" \ + "fadd v18.2d, v18.2d, v19.2d \n" \ + "ldp q28, q29, ["X", #192] \n" \ + "ldp q30, q31, ["X", #224] \n" \ + "fabs v24.2d, v24.2d \n" \ + "fabs v25.2d, v25.2d \n" \ + "fabs v26.2d, v26.2d \n" \ + "fabs v27.2d, v27.2d \n" \ + "add "X", "X", #256 \n" \ + "fadd v20.2d, v20.2d, v21.2d \n" \ + "fadd v22.2d, v22.2d, v23.2d \n" \ + "fabs v28.2d, v28.2d \n" \ + "fabs v29.2d, v29.2d \n" \ + "fabs v30.2d, v30.2d \n" \ + "fabs v31.2d, v31.2d \n" \ + "PRFM PLDL1KEEP, ["X", #1024] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+64] \n" \ + "fadd v24.2d, v24.2d, v25.2d \n" \ + "fadd v26.2d, v26.2d, v27.2d \n" \ + "fadd v28.2d, v28.2d, v29.2d \n" \ + "fadd v30.2d, v30.2d, v31.2d \n" \ + "fadd v0.2d, v0.2d, v16.2d \n" \ + "fadd v1.2d, v1.2d, v18.2d \n" \ + "fadd v2.2d, v2.2d, v20.2d \n" \ + "fadd v3.2d, v3.2d, v22.2d \n" \ + "PRFM PLDL1KEEP, ["X", #1024+128] \n" \ + "PRFM PLDL1KEEP, ["X", #1024+192] \n" \ + "fadd v4.2d, v4.2d, v24.2d \n" \ + "fadd v5.2d, v5.2d, v26.2d \n" \ + "fadd v6.2d, v6.2d, v28.2d \n" \ + "fadd v7.2d, v7.2d, v30.2d \n" + +#define KERNEL_F16_FINALIZE \ + "fadd v0.2d, v0.2d, v1.2d \n" \ + "fadd v2.2d, v2.2d, v3.2d \n" \ + "fadd v4.2d, v4.2d, v5.2d \n" \ + "fadd v6.2d, v6.2d, v7.2d \n" \ + "fadd v0.2d, v0.2d, v2.2d \n" \ + "fadd v4.2d, v4.2d, v6.2d \n" \ + "fadd v0.2d, v0.2d, v4.2d \n" \ + "faddp "SUMF", v0.2d \n" + +#define INIT_S \ + "lsl "INC_X", "INC_X", #4 \n" + +#define KERNEL_S1 \ + "ldr q1, ["X"] \n" \ + "add "X", "X", "INC_X" \n" \ + "fabs v1.2d, v1.2d \n" \ + "faddp d1, v1.2d \n" \ + "fadd "SUMF", "SUMF", d1 \n" + + +#if defined(SMP) +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, + BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, + void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + + +static FLOAT zasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ + FLOAT asum = 0.0 ; + + if ( n < 0 ) return(asum); + + __asm__ __volatile__ ( + " mov "N", %[N_] \n" + " mov "X", %[X_] \n" + " mov "INC_X", %[INCX_] \n" + " fmov "SUMF", "REG0" \n" + " fmov d1, "REG0" \n" + " fmov d2, "REG0" \n" + " fmov d3, "REG0" \n" + " fmov d4, "REG0" \n" + " fmov d5, "REG0" \n" + " fmov d6, "REG0" \n" + " fmov d7, "REG0" \n" + " cmp "N", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", xzr \n" + " ble 9f //asum_kernel_L999 \n" + " cmp "INC_X", #1 \n" + " bne 5f //asum_kernel_S_BEGIN \n" + + "1: //asum_kernel_F_BEGIN: \n" + " asr "J", "N", #4 \n" + " cmp "J", xzr \n" + " beq 3f //asum_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) + ".align 5 \n" +#endif + "2: //asum_kernel_F16: \n" + " "KERNEL_F16" \n" + " subs "J", "J", #1 \n" + " bne 2b //asum_kernel_F16 \n" + " "KERNEL_F16_FINALIZE" \n" + + "3: //asum_kernel_F1: \n" + " ands "J", "N", #15 \n" + " ble 9f //asum_kernel_L999 \n" + + "4: //asum_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 4b //asum_kernel_F10 \n" + " b 9f //asum_kernel_L999 \n" + + "5: //asum_kernel_S_BEGIN: \n" + " "INIT_S" \n" + " asr "J", "N", #2 \n" + " cmp "J", xzr \n" + " ble 7f //asum_kernel_S1 \n" + + "6: //asum_kernel_S4: \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 6b //asum_kernel_S4 \n" + + "7: //asum_kernel_S1: \n" + " ands "J", "N", #3 \n" + " ble 9f //asum_kernel_L999 \n" + + "8: //asum_kernel_S10: \n" + " "KERNEL_S1" \n" + " subs "J", "J", #1 \n" + " bne 8b //asum_kernel_S10 \n" + + "9: //asum_kernel_L999: \n" + " fmov %[ASUM_], "SUMF" \n" + + : [ASUM_] "=r" (asum) //%0 + : [N_] "r" (n), //%1 + [X_] "r" (x), //%2 + [INCX_] "r" (inc_x) //%3 + : "cc", + "memory", + "x0", "x1", "x2", "x3", "x4", "x5", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" + ); + + return asum; +} + +#if defined(SMP) +static int zasum_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, FLOAT *result, BLASLONG dummy3) +{ + *result = zasum_compute(n, x, inc_x); + + return 0; +} +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT asum = 0.0; + +#if defined(SMP) + if (inc_x == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + asum = zasum_compute(n, x, inc_x); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) * 2]; + FLOAT *ptr; + + mode = BLAS_DOUBLE | BLAS_COMPLEX; + + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, NULL, 0, result, 0, + ( void *)zasum_thread_function, nthreads); + + ptr = (FLOAT *)result; + for (i = 0; i < nthreads; i++) { + asum = asum + (*ptr); + ptr = (FLOAT *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + asum = zasum_compute(n, x, inc_x); +#endif + + return asum; +}