diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5a9dd80d05..6d5447492d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -101,9 +101,6 @@ jobs: - name: build-ubuntu-clang12 env: {CXX: clang++-12, INSTALL_EXTRA: clang-12 libomp-12-dev} add-llvm-repo: true - - name: build-ubuntu-icpc - env: {CXX: icpc} - intel: true - name: build-ubuntu-icpx env: {CXX: icpx} intel: true diff --git a/include/llama/BlobAllocators.hpp b/include/llama/BlobAllocators.hpp index 8690b1ed93..b365b60fa8 100644 --- a/include/llama/BlobAllocators.hpp +++ b/include/llama/BlobAllocators.hpp @@ -10,9 +10,6 @@ #include #include #include -#ifdef __INTEL_COMPILER -# include -#endif #if defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000 # include #endif diff --git a/include/llama/Tuple.hpp b/include/llama/Tuple.hpp index 45db85875c..f2c1835e96 100644 --- a/include/llama/Tuple.hpp +++ b/include/llama/Tuple.hpp @@ -30,8 +30,6 @@ namespace llama { } - // icpc fails to compile the treemap tests with this ctor -#ifndef __INTEL_COMPILER /// Construct a tuple from forwarded values of potentially different types as the tuple stores. // SFINAE away this ctor if tuple elements cannot be constructed from ctor arguments template< @@ -46,7 +44,6 @@ namespace llama , rest(std::forward(restArgs)...) { } -#endif FirstElement first; ///< the first element (if existing) #ifndef __NVCC__ diff --git a/include/llama/macros.hpp b/include/llama/macros.hpp index c870c653c6..b4f38b6fca 100644 --- a/include/llama/macros.hpp +++ b/include/llama/macros.hpp @@ -3,7 +3,11 @@ #pragma once -#if defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER) +#ifdef __INTEL_COMPILER +# error LLAMA has stopped supporting the Intel Classic Compiler after Intel announced its planned deprecation and replacement by the Intel LLVM-based compiler. Please migrate to Intel's LLVM-based compiler. +#endif + +#if defined(__INTEL_LLVM_COMPILER) # define LLAMA_INDEPENDENT_DATA _Pragma("ivdep") #elif defined(__clang__) # define LLAMA_INDEPENDENT_DATA _Pragma("clang loop vectorize(assume_safety) interleave(assume_safety)") @@ -28,7 +32,7 @@ # define LLAMA_FN_HOST_ACC_INLINE __host__ __device__ __forceinline__ # elif defined(__GNUC__) || defined(__clang__) # define LLAMA_FN_HOST_ACC_INLINE inline __attribute__((always_inline)) -# elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER) +# elif defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) # define LLAMA_FN_HOST_ACC_INLINE __forceinline # else /// Some offloading parallelization language extensions such a CUDA, OpenACC or OpenMP 4.5 need to specify whether a @@ -44,7 +48,7 @@ #ifndef LLAMA_LAMBDA_INLINE_WITH_SPECIFIERS # if defined(__clang__) || defined(__INTEL_LLVM_COMPILER) # define LLAMA_LAMBDA_INLINE_WITH_SPECIFIERS(...) __attribute__((always_inline)) __VA_ARGS__ -# elif defined(__GNUC__) || defined(__INTEL_COMPILER) || (defined(__NVCC__) && !defined(_MSC_VER)) +# elif defined(__GNUC__) || (defined(__NVCC__) && !defined(_MSC_VER)) # define LLAMA_LAMBDA_INLINE_WITH_SPECIFIERS(...) __VA_ARGS__ __attribute__((always_inline)) # elif defined(_MSC_VER) # define LLAMA_LAMBDA_INLINE_WITH_SPECIFIERS(...) \ @@ -66,9 +70,7 @@ # define LLAMA_SUPPRESS_HOST_DEVICE_WARNING #endif -#if defined(__INTEL_COMPILER) /*|| defined(__INTEL_LLVM_COMPILER)*/ -# define LLAMA_FORCE_INLINE_RECURSIVE _Pragma("forceinline recursive") -#elif defined(_MSC_VER) +#if defined(_MSC_VER) # define LLAMA_FORCE_INLINE_RECURSIVE __pragma(inline_depth(255)) #else /// Forces the compiler to recursively inline the call hiearchy started by the subsequent function call.