diff --git a/CHANGELOG.md b/CHANGELOG.md index dab9f20e3adb..fd86630f0672 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -154,7 +154,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Starting from HIP 5.4.0, the HIP back-end internally uses `hipLaunchHostFunc` instead of a work-around #1883 - Adapted to API changes in CUDA 11.7's stream memory operations #1878 #1919 - Shortened mangled CUDA kernel names #1795 - - CUDA runtime versions checks are now based upon `CUDART_VERSION` instead of `BOOST_LANG_CUDA` #1777 + - CUDA runtime versions checks are now based upon `CUDART_VERSION` instead of `ALPAKA_LANG_CUDA` #1777 - Because of a HIP performance regression the HIP back-end now uses the emulated `atomicAdd(float)` on the `Threads` hierarchy level #1771 - Changed look-up of built-in and emulated atomic functions for the CUDA and HIP back-ends #1768 - The HIP back-end now uses the built-in `atomicAdd(double)` #1767 @@ -396,7 +396,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Renamed namespace `traits` to `trait` #1651 - alpaka now enforces that kernel functions are trivially copyable #1654 - Replaced the internal `hipLaunchKernelGGL()` call with a `kernel<<<...>>>()` call #1663 -- `BOOST_LANG_HIP` will now report a (somewhat) correct version number (for internal consumption) #1664 +- `ALPAKA_LANG_HIP` will now report a (somewhat) correct version number (for internal consumption) #1664 - Refactored `Queue` implementation for CUDA and HIP to reduce code duplication #1667 - `core/CudaHipMath.hpp` was merged back into `math/MathUniformCudaHipBuiltIn.hpp` #1668 - The OpenMP 5 memory fence no longer explicitly sets the `acq_rel` memory order clause since it is the default #1673 @@ -615,7 +615,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - fix Intel compiler detection #1070 - CMake: build type CXX flag not passed to nvcc #1073 - work around Intel ICE (Internal Compiler Error) when using std::decay on empty template parameter packs #1074 -- BoostPredef.hpp: Add redefinition of BOOST_COMP_PGI #1082 +- BoostPredef.hpp: Add redefinition of ALPAKA_COMP_PGI #1082 - fix min/max return type deduction #1085 - CMake: fix boost fiber linking #1088 - fix HIP-clang compile #1107 @@ -800,7 +800,7 @@ The script only works if you used the full namespace `alpaka::*` for alpaka func - This use case can now be handled with the support for external CPU queues as can bee seen in the example QueueCpuOmp2CollectiveImpl - previously it was possible to have kernels return values even though they were always ignored. Now kernels are checked to always return void - renamed all files with *Stl suffix to *StdLib -- renamed BOOST_ARCH_CUDA_DEVICE to BOOST_ARCH_PTX +- renamed ALPAKA_ARCH_CUDA_DEVICE to ALPAKA_ARCH_PTX - executors have been renamed due to the upcoming standard C++ feature with a different meaning. All files within alpaka/exec/ have been moved to alpaka/kernel/ and the files and classes have been renamed from Exec* to TaskKernel*. This should not affect users of alpaka but will affect extensions. ## [0.3.6] - 2020-01-06 diff --git a/README.md b/README.md index 8b092720d504..e167d5d3dbe0 100644 --- a/README.md +++ b/README.md @@ -85,9 +85,9 @@ Other compilers or combinations marked with :x: in the table above may work but Dependencies ------------ -[Boost](https://boost.org/) 1.74.0+ is the only mandatory external dependency. +[Boost](https://boost.org/) 1.74.0+ is an optional dependency. +Boost is used for demangle C++ object names and faster atomic operations on CPU backends in case the C++20 feature `std::atomic_ref` is not supported by the compiler. The **alpaka** library itself just requires header-only libraries. -However some of the accelerator back-end implementations require different boost libraries to be built. When an accelerator back-end using *CUDA* is enabled, version *11.2* (with nvcc as CUDA compiler) or version *11.2* (with clang as CUDA compiler) of the *CUDA SDK* is the minimum requirement. *NOTE*: When using clang as a native *CUDA* compiler, the *CUDA accelerator back-end* can not be enabled together with any *OpenMP accelerator back-end* because this combination is currently unsupported. diff --git a/cmake/alpakaCommon.cmake b/cmake/alpakaCommon.cmake index 4147f719c1de..c2ade53548ea 100644 --- a/cmake/alpakaCommon.cmake +++ b/cmake/alpakaCommon.cmake @@ -207,6 +207,8 @@ endif() #------------------------------------------------------------------------------- # Find Boost. +# Boost is optional and only required for fast atomics on CPU side if std::atomic_ref is not available or +# for class name demangling. set(_alpaka_BOOST_MIN_VER "1.74.0") if(${alpaka_DEBUG} GREATER 1) @@ -214,10 +216,14 @@ if(${alpaka_DEBUG} GREATER 1) SET(Boost_DETAILED_FAILURE_MSG ON) endif() -find_package(Boost ${_alpaka_BOOST_MIN_VER} REQUIRED - OPTIONAL_COMPONENTS atomic) +find_package(Boost ${_alpaka_BOOST_MIN_VER} OPTIONAL_COMPONENTS atomic) -target_link_libraries(alpaka INTERFACE Boost::headers) +if(Boost_FOUND) + target_link_libraries(alpaka INTERFACE Boost::headers) + target_compile_definitions(alpaka INTERFACE ALPAKA_HAS_BOOST_HEADERS) +else() + message(STATUS "Boost not available, class name demangling is not supported.") +endif() if(alpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE OR alpaka_ACC_CPU_B_SEQ_T_THREADS_ENABLE OR @@ -242,68 +248,22 @@ if(alpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE OR endif() endif() - if(Boost_ATOMIC_FOUND AND (NOT alpaka_HAS_STD_ATOMIC_REF)) - message(STATUS "boost::atomic_ref found") - target_link_libraries(alpaka INTERFACE Boost::atomic) + if(NOT alpaka_HAS_STD_ATOMIC_REF) + if(Boost_ATOMIC_FOUND) + message(STATUS "boost::atomic_ref found") + target_link_libraries(alpaka INTERFACE Boost::atomic) + else() + message(STATUS "boost::atomic_ref NOT found") + endif() endif() endif() if(alpaka_ACC_CPU_DISABLE_ATOMIC_REF OR ((NOT alpaka_HAS_STD_ATOMIC_REF) AND (NOT Boost_ATOMIC_FOUND))) - message(STATUS "atomic_ref was not found or manually disabled. Falling back to lock-based CPU atomics.") + message(STATUS "atomic_ref or boost::atomic_ref was not found or manually disabled. Falling back to lock-based CPU atomics.") target_compile_definitions(alpaka INTERFACE ALPAKA_DISABLE_ATOMIC_ATOMICREF) endif() endif() -if(${alpaka_DEBUG} GREATER 1) - message(STATUS "Boost in:") - cmake_print_variables(BOOST_ROOT) - cmake_print_variables(BOOSTROOT) - cmake_print_variables(BOOST_INCLUDEDIR) - cmake_print_variables(BOOST_LIBRARYDIR) - cmake_print_variables(Boost_NO_SYSTEM_PATHS) - cmake_print_variables(Boost_ADDITIONAL_VERSIONS) - cmake_print_variables(Boost_USE_MULTITHREADED) - cmake_print_variables(Boost_USE_STATIC_LIBS) - cmake_print_variables(Boost_USE_STATIC_RUNTIME) - cmake_print_variables(Boost_USE_DEBUG_RUNTIME) - cmake_print_variables(Boost_USE_DEBUG_PYTHON) - cmake_print_variables(Boost_USE_STLPORT) - cmake_print_variables(Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS) - cmake_print_variables(Boost_COMPILER) - cmake_print_variables(Boost_THREADAPI) - cmake_print_variables(Boost_NAMESPACE) - cmake_print_variables(Boost_DEBUG) - cmake_print_variables(Boost_DETAILED_FAILURE_MSG) - cmake_print_variables(Boost_REALPATH) - cmake_print_variables(Boost_NO_BOOST_CMAKE) - message(STATUS "Boost out:") - cmake_print_variables(Boost_FOUND) - cmake_print_variables(Boost_INCLUDE_DIRS) - cmake_print_variables(Boost_LIBRARY_DIRS) - cmake_print_variables(Boost_LIBRARIES) - cmake_print_variables(Boost_CONTEXT_FOUND) - cmake_print_variables(Boost_CONTEXT_LIBRARY) - cmake_print_variables(Boost_SYSTEM_FOUND) - cmake_print_variables(Boost_SYSTEM_LIBRARY) - cmake_print_variables(Boost_THREAD_FOUND) - cmake_print_variables(Boost_THREAD_LIBRARY) - cmake_print_variables(Boost_ATOMIC_FOUND) - cmake_print_variables(Boost_ATOMIC_LIBRARY) - cmake_print_variables(Boost_CHRONO_FOUND) - cmake_print_variables(Boost_CHRONO_LIBRARY) - cmake_print_variables(Boost_DATE_TIME_FOUND) - cmake_print_variables(Boost_DATE_TIME_LIBRARY) - cmake_print_variables(Boost_VERSION) - cmake_print_variables(Boost_LIB_VERSION) - cmake_print_variables(Boost_MAJOR_VERSION) - cmake_print_variables(Boost_MINOR_VERSION) - cmake_print_variables(Boost_SUBMINOR_VERSION) - cmake_print_variables(Boost_LIB_DIAGNOSTIC_DEFINITIONS) - message(STATUS "Boost cached:") - cmake_print_variables(Boost_INCLUDE_DIR) - cmake_print_variables(Boost_LIBRARY_DIR) -endif() - #------------------------------------------------------------------------------- # If available, use C++20 math constants. Otherwise, fall back to M_PI etc. if(${alpaka_CXX_STANDARD} VERSION_LESS "20") diff --git a/include/alpaka/atomic/AtomicAtomicRef.hpp b/include/alpaka/atomic/AtomicAtomicRef.hpp index 61b825c1a0e2..cf98b742f88a 100644 --- a/include/alpaka/atomic/AtomicAtomicRef.hpp +++ b/include/alpaka/atomic/AtomicAtomicRef.hpp @@ -219,12 +219,12 @@ namespace alpaka T result; do { -# if BOOST_COMP_GNUC || BOOST_COMP_CLANG +# if ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wfloat-equal" # endif result = ((old == compare) ? value : old); -# if BOOST_COMP_GNUC || BOOST_COMP_CLANG +# if ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG # pragma GCC diagnostic pop # endif } while(!ref.compare_exchange_weak(old, result)); diff --git a/include/alpaka/atomic/AtomicCpu.hpp b/include/alpaka/atomic/AtomicCpu.hpp index 5667bd0080bd..205e5ee54b8b 100644 --- a/include/alpaka/atomic/AtomicCpu.hpp +++ b/include/alpaka/atomic/AtomicCpu.hpp @@ -7,14 +7,14 @@ #include "alpaka/core/BoostPredef.hpp" // clang 9/10/11 together with nvcc<11.6.0 as host compiler fails at compile time when using boost::atomic_ref -#ifdef BOOST_COMP_CLANG_AVAILABLE -# if(BOOST_COMP_CLANG < BOOST_VERSION_NUMBER(12, 0, 0) && BOOST_COMP_NVCC \ - && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 6, 0)) +#ifdef ALPAKA_COMP_CLANG_AVAILABLE +# if(ALPAKA_COMP_CLANG < ALPAKA_VERSION_NUMBER(12, 0, 0) && ALPAKA_COMP_NVCC \ + && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 6, 0)) # if !defined(ALPAKA_DISABLE_ATOMIC_ATOMICREF) # define ALPAKA_DISABLE_ATOMIC_ATOMICREF # endif # endif -#endif // BOOST_COMP_CLANG_AVAILABLE +#endif // ALPAKA_COMP_CLANG_AVAILABLE #include "alpaka/atomic/AtomicAtomicRef.hpp" #include "alpaka/atomic/AtomicStdLibLock.hpp" diff --git a/include/alpaka/atomic/AtomicOmpBuiltIn.hpp b/include/alpaka/atomic/AtomicOmpBuiltIn.hpp index e1f0ba0eee8f..b8a39b91cfb6 100644 --- a/include/alpaka/atomic/AtomicOmpBuiltIn.hpp +++ b/include/alpaka/atomic/AtomicOmpBuiltIn.hpp @@ -35,7 +35,7 @@ namespace alpaka T old; auto& ref(*addr); // atomically update ref, but capture the original value in old -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" # endif @@ -44,7 +44,7 @@ namespace alpaka old = ref; ref += value; } -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif return old; @@ -60,7 +60,7 @@ namespace alpaka T old; auto& ref(*addr); // atomically update ref, but capture the original value in old -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" # endif @@ -69,7 +69,7 @@ namespace alpaka old = ref; ref -= value; } -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif return old; @@ -103,7 +103,7 @@ namespace alpaka T old; auto& ref(*addr); // atomically update ref, but capture the original value in old -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" # endif @@ -112,7 +112,7 @@ namespace alpaka old = ref; ref &= value; } -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif return old; @@ -128,7 +128,7 @@ namespace alpaka T old; auto& ref(*addr); // atomically update ref, but capture the original value in old -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" # endif @@ -137,7 +137,7 @@ namespace alpaka old = ref; ref |= value; } -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif return old; @@ -153,7 +153,7 @@ namespace alpaka T old; auto& ref(*addr); // atomically update ref, but capture the original value in old -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" # endif @@ -162,7 +162,7 @@ namespace alpaka old = ref; ref ^= value; } -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif return old; diff --git a/include/alpaka/atomic/AtomicStdLibLock.hpp b/include/alpaka/atomic/AtomicStdLibLock.hpp index 16a659fb07b9..964c3bae2990 100644 --- a/include/alpaka/atomic/AtomicStdLibLock.hpp +++ b/include/alpaka/atomic/AtomicStdLibLock.hpp @@ -57,7 +57,7 @@ namespace alpaka constexpr size_t hashTableSize = THashTableSize == 0u ? 1u : nextPowerOf2(THashTableSize); size_t const hashedAddr = hash(ptr) & (hashTableSize - 1u); -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wexit-time-destructors" # endif @@ -65,7 +65,7 @@ namespace alpaka std::mutex, hashTableSize> m_mtxAtomic; //!< The mutex protecting access for an atomic operation. -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif return m_mtxAtomic[hashedAddr]; diff --git a/include/alpaka/atomic/AtomicUniformCudaHip.hpp b/include/alpaka/atomic/AtomicUniformCudaHip.hpp index 330e3a4d51d2..f4006e0ced75 100644 --- a/include/alpaka/atomic/AtomicUniformCudaHip.hpp +++ b/include/alpaka/atomic/AtomicUniformCudaHip.hpp @@ -26,17 +26,17 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif //! clang is providing a builtin for different atomic functions even if these is not supported for architectures < 6.0 # define CLANG_CUDA_PTX_WORKAROUND \ - (BOOST_COMP_CLANG && BOOST_LANG_CUDA && BOOST_ARCH_PTX < BOOST_VERSION_NUMBER(6, 0, 0)) + (ALPAKA_COMP_CLANG && ALPAKA_LANG_CUDA && ALPAKA_ARCH_PTX < ALPAKA_VERSION_NUMBER(6, 0, 0)) //! These types must be in the global namespace for checking existence of respective functions in global namespace via //! SFINAE, so we use inline namespace. @@ -136,7 +136,7 @@ inline namespace alpakaGlobal }; # endif -# if(BOOST_LANG_HIP) +# if(ALPAKA_LANG_HIP) // HIP shows bad performance with builtin atomicAdd(float*,float) for the hierarchy threads therefore we do not // call the buildin method and instead use the atomicCAS emulation. For details see: // https://github.com/alpaka-group/alpaka/issues/1657 @@ -210,7 +210,7 @@ inline namespace alpakaGlobal # endif // disable HIP atomicMin: see https://github.com/ROCm-Developer-Tools/hipamd/pull/40 -# if(BOOST_LANG_HIP) +# if(ALPAKA_LANG_HIP) template struct AlpakaBuiltInAtomic : std::false_type { @@ -277,7 +277,7 @@ inline namespace alpakaGlobal # endif // disable HIP atomicMax: see https://github.com/ROCm-Developer-Tools/hipamd/pull/40 -# if(BOOST_LANG_HIP) +# if(ALPAKA_LANG_HIP) template struct AlpakaBuiltInAtomic : std::false_type { diff --git a/include/alpaka/atomic/AtomicUniformCudaHipBuiltIn.hpp b/include/alpaka/atomic/AtomicUniformCudaHipBuiltIn.hpp index 86c5120f4fe7..4ad7ede05c48 100644 --- a/include/alpaka/atomic/AtomicUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/atomic/AtomicUniformCudaHipBuiltIn.hpp @@ -18,11 +18,11 @@ # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif @@ -79,7 +79,7 @@ namespace alpaka::trait // Emulating atomics with atomicCAS is mentioned in the programming guide too. // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions -# if BOOST_LANG_HIP +# if ALPAKA_LANG_HIP # if __has_builtin(__hip_atomic_load) EmulatedType old{__hip_atomic_load(addressAsIntegralType, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT)}; # else diff --git a/include/alpaka/atomic/Op.hpp b/include/alpaka/atomic/Op.hpp index 2912556d5083..bbe98c88a68d 100644 --- a/include/alpaka/atomic/Op.hpp +++ b/include/alpaka/atomic/Op.hpp @@ -22,13 +22,13 @@ namespace alpaka { auto const old = *addr; auto& ref = *addr; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" #endif ref += value; return old; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } @@ -44,12 +44,12 @@ namespace alpaka { auto const old = *addr; auto& ref = *addr; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconversion" #endif ref -= value; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif return old; @@ -194,13 +194,13 @@ namespace alpaka // gcc-7.4.0 assumes for an optimization that a signed overflow does not occur here. // That's fine, so ignore that warning. -#if BOOST_COMP_GNUC && (BOOST_COMP_GNUC == BOOST_VERSION_NUMBER(7, 4, 0)) +#if ALPAKA_COMP_GNUC && (ALPAKA_COMP_GNUC == ALPAKA_VERSION_NUMBER(7, 4, 0)) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wstrict-overflow" #endif // check if values are bit-wise equal ref = ((old == compare) ? value : old); -#if BOOST_COMP_GNUC && (BOOST_COMP_GNUC == BOOST_VERSION_NUMBER(7, 4, 0)) +#if ALPAKA_COMP_GNUC && (ALPAKA_COMP_GNUC == ALPAKA_VERSION_NUMBER(7, 4, 0)) # pragma GCC diagnostic pop #endif return old; @@ -232,7 +232,7 @@ namespace alpaka // gcc-7.4.0 assumes for an optimization that a signed overflow does not occur here. // That's fine, so ignore that warning. -#if BOOST_COMP_GNUC && (BOOST_COMP_GNUC == BOOST_VERSION_NUMBER(7, 4, 0)) +#if ALPAKA_COMP_GNUC && (ALPAKA_COMP_GNUC == ALPAKA_VERSION_NUMBER(7, 4, 0)) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wstrict-overflow" #endif @@ -240,7 +240,7 @@ namespace alpaka BitUnion c{compare}; ref = ((o.r == c.r) ? value : old); -#if BOOST_COMP_GNUC && (BOOST_COMP_GNUC == BOOST_VERSION_NUMBER(7, 4, 0)) +#if ALPAKA_COMP_GNUC && (ALPAKA_COMP_GNUC == ALPAKA_VERSION_NUMBER(7, 4, 0)) # pragma GCC diagnostic pop #endif return old; diff --git a/include/alpaka/block/shared/dyn/BlockSharedMemDynMember.hpp b/include/alpaka/block/shared/dyn/BlockSharedMemDynMember.hpp index c6a323989d21..3833492af4fc 100644 --- a/include/alpaka/block/shared/dyn/BlockSharedMemDynMember.hpp +++ b/include/alpaka/block/shared/dyn/BlockSharedMemDynMember.hpp @@ -28,7 +28,7 @@ namespace alpaka }; } // namespace detail -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(push) # pragma warning(disable : 4324) // warning C4324: structure was padded due to alignment specifier #endif @@ -83,7 +83,7 @@ namespace alpaka mutable std::array::staticAllocBytes> m_mem; std::uint32_t m_dynPitch; }; -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(pop) #endif @@ -92,7 +92,7 @@ namespace alpaka template struct GetDynSharedMem> { -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored \ "-Wcast-align" // "cast from 'unsigned char*' to 'unsigned int*' increases required alignment of target type" @@ -105,7 +105,7 @@ namespace alpaka "defaultAlignment!"); return reinterpret_cast(mem.dynMemBegin()); } -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif }; diff --git a/include/alpaka/block/shared/dyn/BlockSharedMemDynUniformCudaHipBuiltIn.hpp b/include/alpaka/block/shared/dyn/BlockSharedMemDynUniformCudaHipBuiltIn.hpp index 8364019a70ba..e9d0adb5d7c5 100644 --- a/include/alpaka/block/shared/dyn/BlockSharedMemDynUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/block/shared/dyn/BlockSharedMemDynUniformCudaHipBuiltIn.hpp @@ -23,11 +23,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/block/shared/st/BlockSharedMemStMemberMasterSync.hpp b/include/alpaka/block/shared/st/BlockSharedMemStMemberMasterSync.hpp index 65bd3043f2fd..81ab763b0ac9 100644 --- a/include/alpaka/block/shared/st/BlockSharedMemStMemberMasterSync.hpp +++ b/include/alpaka/block/shared/st/BlockSharedMemStMemberMasterSync.hpp @@ -40,7 +40,7 @@ namespace alpaka namespace trait { -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored \ "-Wcast-align" // "cast from 'unsigned char*' to 'unsigned int*' increases required alignment of target type" @@ -71,7 +71,7 @@ namespace alpaka return *data; } }; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif template diff --git a/include/alpaka/block/shared/st/BlockSharedMemStUniformCudaHipBuiltIn.hpp b/include/alpaka/block/shared/st/BlockSharedMemStUniformCudaHipBuiltIn.hpp index 9f4ed0ca207d..b7757fe8a206 100644 --- a/include/alpaka/block/shared/st/BlockSharedMemStUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/block/shared/st/BlockSharedMemStUniformCudaHipBuiltIn.hpp @@ -23,11 +23,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/block/shared/st/detail/BlockSharedMemStMemberImpl.hpp b/include/alpaka/block/shared/st/detail/BlockSharedMemStMemberImpl.hpp index eb09790ff859..cbb17a63134a 100644 --- a/include/alpaka/block/shared/st/detail/BlockSharedMemStMemberImpl.hpp +++ b/include/alpaka/block/shared/st/detail/BlockSharedMemStMemberImpl.hpp @@ -64,7 +64,7 @@ namespace alpaka::detail meta->offset = m_allocdBytes; } -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored \ "-Wcast-align" // "cast from 'unsigned char*' to 'unsigned int*' increases required alignment of target type" @@ -108,7 +108,7 @@ namespace alpaka::detail } private: -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif diff --git a/include/alpaka/block/sync/BlockSyncUniformCudaHipBuiltIn.hpp b/include/alpaka/block/sync/BlockSyncUniformCudaHipBuiltIn.hpp index ddc369d5bca5..900a0a2f0239 100644 --- a/include/alpaka/block/sync/BlockSyncUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/block/sync/BlockSyncUniformCudaHipBuiltIn.hpp @@ -20,11 +20,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif @@ -46,7 +46,7 @@ namespace alpaka BlockSyncUniformCudaHipBuiltIn const& /*blockSync*/, int predicate) -> int { -# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && BOOST_COMP_HIP +# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && ALPAKA_COMP_HIP // workaround for unsupported syncthreads_* operation on AMD hardware without sync extension __shared__ int tmp; __syncthreads(); @@ -71,7 +71,7 @@ namespace alpaka BlockSyncUniformCudaHipBuiltIn const& /*blockSync*/, int predicate) -> int { -# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && BOOST_COMP_HIP +# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && ALPAKA_COMP_HIP // workaround for unsupported syncthreads_* operation on AMD hardware without sync extension __shared__ int tmp; __syncthreads(); @@ -96,7 +96,7 @@ namespace alpaka BlockSyncUniformCudaHipBuiltIn const& /*blockSync*/, int predicate) -> int { -# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && BOOST_COMP_HIP +# if defined(__HIP_ARCH_HAS_SYNC_THREAD_EXT__) && __HIP_ARCH_HAS_SYNC_THREAD_EXT__ == 0 && ALPAKA_COMP_HIP // workaround for unsupported syncthreads_* operation on AMD hardware without sync extension __shared__ int tmp; __syncthreads(); diff --git a/include/alpaka/core/Align.hpp b/include/alpaka/core/Align.hpp index d2be0149a50f..9ad8e116f42a 100644 --- a/include/alpaka/core/Align.hpp +++ b/include/alpaka/core/Align.hpp @@ -49,7 +49,7 @@ namespace alpaka::core struct OptimalAlignment : std::integral_constant< std::size_t, -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC // GCC does not support alignments larger then 128: "warning: requested alignment 256 is larger // than 128[-Wattributes]". (TsizeBytes > 64) ? 128 : diff --git a/include/alpaka/core/ApiCudaRt.hpp b/include/alpaka/core/ApiCudaRt.hpp index ee2cdb2e95f5..a37f8da349ad 100644 --- a/include/alpaka/core/ApiCudaRt.hpp +++ b/include/alpaka/core/ApiCudaRt.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #ifdef ALPAKA_ACC_GPU_CUDA_ENABLED # include @@ -15,7 +15,7 @@ namespace alpaka { // Names static constexpr char name[] = "Cuda"; - static constexpr auto version = BOOST_PREDEF_MAKE_10_VVRRP(CUDART_VERSION); + static constexpr auto version = ALPAKA_LANG_CUDA; // Types using DeviceAttr_t = ::cudaDeviceAttr; @@ -182,12 +182,12 @@ namespace alpaka template static inline Error_t funcGetAttributes(FuncAttributes_t* attr, T* func) { -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconditionally-supported" # endif return ::cudaFuncGetAttributes(attr, reinterpret_cast(func)); -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif } diff --git a/include/alpaka/core/ApiHipRt.hpp b/include/alpaka/core/ApiHipRt.hpp index d765246c2ec2..bf35c939f5fa 100644 --- a/include/alpaka/core/ApiHipRt.hpp +++ b/include/alpaka/core/ApiHipRt.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #ifdef ALPAKA_ACC_GPU_HIP_ENABLED @@ -17,7 +17,7 @@ namespace alpaka { // Names static constexpr char name[] = "Hip"; - static constexpr auto version = BOOST_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0); + static constexpr auto version = ALPAKA_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0); // Types using DeviceAttr_t = ::hipDeviceAttribute_t; @@ -207,12 +207,12 @@ namespace alpaka template static inline Error_t funcGetAttributes(FuncAttributes_t* attr, T* func) { -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconditionally-supported" # endif return ::hipFuncGetAttributes(attr, reinterpret_cast(func)); -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif } diff --git a/include/alpaka/core/BoostPredef.hpp b/include/alpaka/core/BoostPredef.hpp index bcd2d3589b1a..2f63c08d80c6 100644 --- a/include/alpaka/core/BoostPredef.hpp +++ b/include/alpaka/core/BoostPredef.hpp @@ -5,75 +5,198 @@ #pragma once -#include - #ifdef __INTEL_COMPILER # warning \ "The Intel Classic compiler (icpc) is no longer supported. Please upgrade to the Intel LLVM compiler (ipcx)." #endif -//---------------------------------------HIP----------------------------------- -// __HIP__ is defined by both hip-clang and vanilla clang in HIP mode. -// https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_porting_guide.md#compiler-defines-summary -#if !defined(BOOST_LANG_HIP) -# if defined(__HIP__) -/* BOOST_LANG_CUDA is enabled when either __CUDACC__ (nvcc) or __CUDA__ (clang) are defined. This occurs when - nvcc / clang encounter a CUDA source file. Since there are no HIP source files we treat every source file - as HIP when we are using a HIP-capable compiler. */ -# include -// HIP doesn't give us a patch level for the last entry, just a gitdate -# define BOOST_LANG_HIP BOOST_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0) +#define ALPAKA_VERSION_NUMBER(major, minor, patch) \ + ((((major) % 1000) * 100'000'000) + (((minor) % 1000) * 100000) + ((patch) % 100000)) + +#define ALPAKA_VERSION_NUMBER_NOT_AVAILABLE ALPAKA_VERSION_NUMBER(0, 0, 0) + +#define ALPAKA_YYYYMMDD_TO_VERSION(V) ALPAKA_VERSION_NUMBER_NOT_AVAILABLE(((V) / 10000), ((V) / 100) % 100, (V) % 100) + + +// ######## detect operating systems ######## + +// WINDOWS +#if !defined(ALPAKA_OS_WINDOWS) +# if defined(__WIN32__) || defined(__MINGW32__) || defined(WIN32) +# define ALPAKA_OS_WINDOWS 1 +# else +# define ALPAKA_OS_WINDOWS 0 +# endif +#endif + + +// Linux +#if !defined(ALPAKA_OS_LINUX) +# if defined(__linux) || defined(__linux__) || defined(__gnu_linux__) +# define ALPAKA_OS_LINUX 1 +# else +# define ALPAKA_OS_LINUX 0 +# endif +#endif + +// Apple +#if !defined(ALPAKA_OS_IOS) +# if defined(__APPLE__) +# define ALPAKA_OS_IOS 1 +# else +# define ALPAKA_OS_IOS 0 +# endif +#endif + +// Cygwin +#if !defined(ALPAKA_OS_CYGWIN) +# if defined(__CYGWIN__) +# define ALPAKA_OS_CYGWIN 1 +# else +# define ALPAKA_OS_CYGWIN 0 +# endif +#endif + +// ### architectures + +// X86 +#if !defined(ALPAKA_ARCH_X86) +# if defined(__x86_64__) || defined(_M_X64) +# define ALPAKA_ARCH_X86 1 +# else +# define ALPAKA_ARCH_X86 0 +# endif +#endif + +// RISCV +#if !defined(ALPAKA_ARCH_RISCV) +# if defined(__riscv) +# define ALPAKA_ARCH_RISCV 1 +# else +# define ALPAKA_ARCH_RISCV 0 +# endif +#endif + +// ARM +#if !defined(ALPAKA_ARCH_ARM) +# if defined(__ARM_ARCH) || defined(__arm__) || defined(__arm64) +# define ALPAKA_ARCH_ARM 1 # else -# define BOOST_LANG_HIP BOOST_VERSION_NUMBER_NOT_AVAILABLE +# define ALPAKA_ARCH_ARM 0 # endif #endif -// HSA device architecture detection (HSA generated via HIP(clang)) -#if !defined(BOOST_ARCH_HSA) -# if defined(__HIP_DEVICE_COMPILE__) && __HIP_DEVICE_COMPILE__ == 1 && defined(__HIP__) -// __HIP_DEVICE_COMPILE__ does not represent feature capability of target device like CUDA_ARCH. -// For feature detection there are special macros, see ROCm's HIP porting guide. -# define BOOST_ARCH_HSA BOOST_VERSION_NUMBER_AVAILABLE +// ARM +#if !defined(ALPAKA_ARCH_PTX) +# if defined(__CUDA_ARCH__) +# define ALPAKA_ARCH_PTX 1 # else -# define BOOST_ARCH_HSA BOOST_VERSION_NUMBER_NOT_AVAILABLE +# define ALPAKA_ARCH_PTX 0 # endif #endif +// ######## compiler ######## + // HIP compiler detection -#if !defined(BOOST_COMP_HIP) +#if !defined(ALPAKA_COMP_HIP) # if defined(__HIP__) // Defined by hip-clang and vanilla clang in HIP mode. # include // HIP doesn't give us a patch level for the last entry, just a gitdate -# define BOOST_COMP_HIP BOOST_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0) +# define ALPAKA_COMP_HIP ALPAKA_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0) # else -# define BOOST_COMP_HIP BOOST_VERSION_NUMBER_NOT_AVAILABLE +# define ALPAKA_COMP_HIP ALPAKA_VERSION_NUMBER_NOT_AVAILABLE # endif #endif +// nvcc compiler +#if defined(__NVCC__) +# define ALPAKA_COMP_NVCC ALPAKA_VERSION_NUMBER(__CUDACC_VER_MAJOR__, __CUDACC_VER_MINOR__, __CUDACC_VER_BUILD__) +#else +# define ALPAKA_COMP_NVCC ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +#endif + +// clang compiler +#if defined(__clang__) +# define ALPAKA_COMP_CLANG ALPAKA_VERSION_NUMBER(__clang_major__, __clang_minor__, __clang_patchlevel__) +#else +# define ALPAKA_COMP_CLANG ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +#endif + +// MSVC compiler +#if defined(_MSC_VER) +# define ALPAKA_COMP_MSVC \ + ALPAKA_VERSION_NUMBER((_MSC_FULL_VER) % 10'000'000, ((_MSC_FULL_VER) / 100000) % 100, (_MSC_FULL_VER) % 100000) +#else +# define ALPAKA_COMP_MSVC ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +#endif + +// gnu compiler +#if defined(__GNUC__) +# if defined(__GNUC_PATCHLEVEL__) +# define ALPAKA_COMP_GNUC ALPAKA_VERSION_NUMBER(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +# else +# define ALPAKA_COMP_GNUC ALPAKA_VERSION_NUMBER(__GNUC__, __GNUC_MINOR__, 0) +# endif +#else +# define ALPAKA_COMP_GNUC ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +#endif + +// IBM compiler +// only clang based is supported +#if defined(__ibmxl__) +# define ALPAKA_COMP_IBM ALPAKA_VERSION_NUMBER(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) +#else +# define ALPAKA_COMP_IBM ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +#endif + // clang CUDA compiler detection // Currently __CUDA__ is only defined by clang when compiling CUDA code. #if defined(__clang__) && defined(__CUDA__) -# define BOOST_COMP_CLANG_CUDA BOOST_COMP_CLANG +# define ALPAKA_COMP_CLANG_CUDA ALPAKA_VERSION_NUMBER(__clang_major__, __clang_minor__, __clang_patchlevel__) #else -# define BOOST_COMP_CLANG_CUDA BOOST_VERSION_NUMBER_NOT_AVAILABLE +# define ALPAKA_COMP_CLANG_CUDA ALPAKA_VERSION_NUMBER_NOT_AVAILABLE #endif // PGI and NV HPC SDK compiler detection -// As of Boost 1.74, Boost.Predef's compiler detection is a bit weird. Recent PGI compilers will be identified as -// BOOST_COMP_PGI_EMULATED. Boost.Predef has lackluster front-end support and mistakes the EDG front-end -// for an actual compiler. -// TODO: Whenever you look at this code please check whether https://github.com/boostorg/predef/issues/28 and -// https://github.com/boostorg/predef/issues/51 have been resolved. -// BOOST_COMP_PGI_EMULATED is defined by boost instead of BOOST_COMP_PGI -#if defined(BOOST_COMP_PGI) && defined(BOOST_COMP_PGI_EMULATED) -# undef BOOST_COMP_PGI -# define BOOST_COMP_PGI BOOST_COMP_PGI_EMULATED +#if defined(__PGI) +# define ALPAKA_COMP_PGI ALPAKA_VERSION_NUMBER(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) +#else +# define ALPAKA_COMP_PGI ALPAKA_VERSION_NUMBER_NOT_AVAILABLE #endif // Intel LLVM compiler detection -#if !defined(BOOST_COMP_ICPX) +#if !defined(ALPAKA_COMP_ICPX) # if defined(SYCL_LANGUAGE_VERSION) && defined(__INTEL_LLVM_COMPILER) // The version string for icpx 2023.1.0 is 20230100. In Boost.Predef this becomes (53,1,0). -# define BOOST_COMP_ICPX BOOST_PREDEF_MAKE_YYYYMMDD(__INTEL_LLVM_COMPILER) +# define ALPAKA_COMP_ICPX ALPAKA_YYYYMMDD_TO_VERSION(__INTEL_LLVM_COMPILER) +# else +# define ALPAKA_COMP_ICPX ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +# endif +#endif + +// ######## C++ language ######## + +//---------------------------------------HIP----------------------------------- +// __HIP__ is defined by both hip-clang and vanilla clang in HIP mode. +// https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_porting_guide.md#compiler-defines-summary +#if !defined(ALPAKA_LANG_HIP) +# if defined(__HIP__) +# include +// HIP doesn't give us a patch level for the last entry, just a gitdate +# define ALPAKA_LANG_HIP ALPAKA_VERSION_NUMBER(HIP_VERSION_MAJOR, HIP_VERSION_MINOR, 0) +# else +# define ALPAKA_LANG_HIP ALPAKA_VERSION_NUMBER_NOT_AVAILABLE +# endif +#endif + +// CUDA +#if !defined(ALPAKA_LANG_CUDA) +# if defined(__CUDACC__) || defined(__CUDA__) +# include +// HIP doesn't give us a patch level for the last entry, just a gitdate +# define ALPAKA_LANG_CUDA \ + ALPAKA_VERSION_NUMBER((CUDART_VERSION) / 1000, ((CUDART_VERSION) / 10) % 100, (CUDART_VERSION) % 10) +# else +# define ALPAKA_LANG_CUDA ALPAKA_VERSION_NUMBER_NOT_AVAILABLE # endif #endif diff --git a/include/alpaka/core/CallbackThread.hpp b/include/alpaka/core/CallbackThread.hpp index 91ecf78c3bed..82db89b32bb6 100644 --- a/include/alpaka/core/CallbackThread.hpp +++ b/include/alpaka/core/CallbackThread.hpp @@ -19,7 +19,7 @@ namespace alpaka::core { class CallbackThread { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wweak-vtables" #endif @@ -27,7 +27,7 @@ namespace alpaka::core // std::future which will keep the task alive and we cannot control the moment the future is set. //! \todo with C++23 std::move_only_function should be used struct Task -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif { diff --git a/include/alpaka/core/Common.hpp b/include/alpaka/core/Common.hpp index 3b181ee2151b..e93067728638 100644 --- a/include/alpaka/core/Common.hpp +++ b/include/alpaka/core/Common.hpp @@ -12,7 +12,7 @@ # include #endif -#if BOOST_LANG_HIP +#if ALPAKA_LANG_HIP // HIP defines some keywords like __forceinline__ in header files. # include #endif @@ -26,7 +26,7 @@ //! -> std::int32_t; //! \endcode //! @{ -#if BOOST_LANG_CUDA || BOOST_LANG_HIP +#if ALPAKA_LANG_CUDA || ALPAKA_LANG_HIP # if defined(ALPAKA_ACC_GPU_CUDA_ONLY_MODE) || defined(ALPAKA_ACC_GPU_HIP_ONLY_MODE) # define ALPAKA_FN_ACC __device__ # else @@ -71,9 +71,9 @@ //! ALPAKA_NO_HOST_ACC_WARNING //! ALPAKA_FN_HOST_ACC function_declaration() //! WARNING: Only use this method if there is no other way. -//! Most cases can be solved by #if BOOST_ARCH_PTX or #if BOOST_LANG_CUDA. -#if(BOOST_LANG_CUDA && !BOOST_COMP_CLANG_CUDA) -# if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +//! Most cases can be solved by #if ALPAKA_ARCH_PTX or #if ALPAKA_LANG_CUDA. +#if(ALPAKA_LANG_CUDA && !ALPAKA_COMP_CLANG_CUDA) +# if ALPAKA_COMP_MSVC # define ALPAKA_NO_HOST_ACC_WARNING __pragma(hd_warning_disable) # else # define ALPAKA_NO_HOST_ACC_WARNING _Pragma("hd_warning_disable") @@ -85,9 +85,9 @@ //! Macro defining the inline function attribute. //! //! The macro should stay on the left hand side of keywords, e.g. 'static', 'constexpr', 'explicit' or the return type. -#if BOOST_LANG_CUDA || BOOST_LANG_HIP +#if ALPAKA_LANG_CUDA || ALPAKA_LANG_HIP # define ALPAKA_FN_INLINE __forceinline__ -#elif BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#elif ALPAKA_COMP_MSVC // TODO: With C++20 [[msvc::forceinline]] can be used. # define ALPAKA_FN_INLINE __forceinline #else @@ -133,8 +133,8 @@ //! alpaka::memcpy(queue, foo, bufHost, extent); //! } //! \endcode -#if((BOOST_LANG_CUDA && BOOST_COMP_CLANG_CUDA) || (BOOST_LANG_CUDA && BOOST_COMP_NVCC && BOOST_ARCH_PTX) \ - || BOOST_LANG_HIP) +#if((ALPAKA_LANG_CUDA && ALPAKA_COMP_CLANG_CUDA) || (ALPAKA_LANG_CUDA && ALPAKA_COMP_NVCC && ALPAKA_ARCH_PTX) \ + || ALPAKA_LANG_HIP) # if defined(__CUDACC_RDC__) || defined(__CLANG_RDC__) # define ALPAKA_STATIC_ACC_MEM_GLOBAL \ template \ @@ -188,8 +188,8 @@ //! alpaka::memcpy(queue, foo, bufHost, extent); //! } //! \endcode -#if((BOOST_LANG_CUDA && BOOST_COMP_CLANG_CUDA) || (BOOST_LANG_CUDA && BOOST_COMP_NVCC && BOOST_ARCH_PTX) \ - || BOOST_LANG_HIP) +#if((ALPAKA_LANG_CUDA && ALPAKA_COMP_CLANG_CUDA) || (ALPAKA_LANG_CUDA && ALPAKA_COMP_NVCC && ALPAKA_ARCH_PTX) \ + || ALPAKA_LANG_HIP) # if defined(__CUDACC_RDC__) || defined(__CLANG_RDC__) # define ALPAKA_STATIC_ACC_MEM_CONSTANT \ template \ @@ -213,8 +213,8 @@ //! This is useful for pointers, (shared) variables and shared memory which are used in combination with //! the alpaka::mem_fence() function. It ensures that memory annotated with this macro will always be written directly //! to memory (and not to a register or cache because of compiler optimizations). -#if(BOOST_LANG_CUDA && BOOST_ARCH_PTX) \ - || (BOOST_LANG_HIP && defined(__HIP_DEVICE_COMPILE__) && __HIP_DEVICE_COMPILE__ == 1) +#if(ALPAKA_LANG_CUDA && ALPAKA_ARCH_PTX) \ + || (ALPAKA_LANG_HIP && defined(__HIP_DEVICE_COMPILE__) && __HIP_DEVICE_COMPILE__ == 1) # define ALPAKA_DEVICE_VOLATILE volatile #else # define ALPAKA_DEVICE_VOLATILE diff --git a/include/alpaka/core/CudaHipCommon.hpp b/include/alpaka/core/CudaHipCommon.hpp index b3fdd7d10304..f2257d38255b 100644 --- a/include/alpaka/core/CudaHipCommon.hpp +++ b/include/alpaka/core/CudaHipCommon.hpp @@ -50,7 +50,7 @@ namespace alpaka ushort3 // CUDA built-in variables have special types in clang native CUDA compilation // defined in cuda_builtin_vars.h -# if BOOST_COMP_CLANG_CUDA +# if ALPAKA_COMP_CLANG_CUDA , __cuda_builtin_threadIdx_t, __cuda_builtin_blockIdx_t, diff --git a/include/alpaka/core/Debug.hpp b/include/alpaka/core/Debug.hpp index dc70ed5138b1..0b3cd79037e1 100644 --- a/include/alpaka/core/Debug.hpp +++ b/include/alpaka/core/Debug.hpp @@ -64,9 +64,9 @@ namespace alpaka::core::detail // Define ALPAKA_DEBUG_BREAK. #if ALPAKA_DEBUG >= ALPAKA_DEBUG_MINIMAL -# if BOOST_COMP_GNUC || BOOST_COMP_CLANG +# if ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG # define ALPAKA_DEBUG_BREAK ::__builtin_trap() -# elif BOOST_COMP_MSVC +# elif ALPAKA_COMP_MSVC # define ALPAKA_DEBUG_BREAK ::__debugbreak() # else # define ALPAKA_DEBUG_BREAK diff --git a/include/alpaka/core/DemangleTypeNames.hpp b/include/alpaka/core/DemangleTypeNames.hpp index 5650054de359..37d28e618d77 100644 --- a/include/alpaka/core/DemangleTypeNames.hpp +++ b/include/alpaka/core/DemangleTypeNames.hpp @@ -6,18 +6,29 @@ #include "alpaka/core/BoostPredef.hpp" -#include +#include +#include + +#if defined(ALPAKA_HAS_BOOST_HEADERS) +# include +#endif namespace alpaka::core { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wexit-time-destructors" # pragma clang diagnostic ignored "-Wmissing-variable-declarations" #endif +#if defined(ALPAKA_HAS_BOOST_HEADERS) + template + inline std::string const demangled = boost::core::demangle(typeid(T).name()); +#else template - inline const std::string demangled = boost::core::demangle(typeid(T).name()); -#if BOOST_COMP_CLANG + inline std::string const demangled = typeid(T).name(); +#endif +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif + } // namespace alpaka::core diff --git a/include/alpaka/core/Hip.hpp b/include/alpaka/core/Hip.hpp index 2c2e425a71e1..736376ed5dba 100644 --- a/include/alpaka/core/Hip.hpp +++ b/include/alpaka/core/Hip.hpp @@ -8,7 +8,7 @@ #include "alpaka/core/UniformCudaHip.hpp" #ifdef ALPAKA_ACC_GPU_HIP_ENABLED -# if !BOOST_LANG_HIP && !defined(ALPAKA_HOST_ONLY) +# if !ALPAKA_LANG_HIP && !defined(ALPAKA_HOST_ONLY) # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif #endif diff --git a/include/alpaka/core/RemoveRestrict.hpp b/include/alpaka/core/RemoveRestrict.hpp index 316630f7f1fe..1f59240e114c 100644 --- a/include/alpaka/core/RemoveRestrict.hpp +++ b/include/alpaka/core/RemoveRestrict.hpp @@ -15,7 +15,7 @@ namespace alpaka using type = T; }; -#if BOOST_COMP_MSVC +#if ALPAKA_COMP_MSVC template struct remove_restrict { diff --git a/include/alpaka/core/Sycl.hpp b/include/alpaka/core/Sycl.hpp index c29fccd76860..af028e228282 100644 --- a/include/alpaka/core/Sycl.hpp +++ b/include/alpaka/core/Sycl.hpp @@ -35,7 +35,7 @@ using AlpakaFormat = char const* [[clang::opencl_constant]]; using AlpakaFormat = char const*; # endif -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" # endif @@ -47,7 +47,7 @@ using AlpakaFormat = char const*; sycl::ext::oneapi::experimental::printf(format, ##__VA_ARGS__); \ } while(false) -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/include/alpaka/core/ThreadPool.hpp b/include/alpaka/core/ThreadPool.hpp index b59555a376f0..0bfe69df9a15 100644 --- a/include/alpaka/core/ThreadPool.hpp +++ b/include/alpaka/core/ThreadPool.hpp @@ -55,7 +55,7 @@ namespace alpaka::core::detail template auto enqueueTask(TFnObj&& task, TArgs&&... args) -> std::future { -#if BOOST_COMP_MSVC +#if ALPAKA_COMP_MSVC // MSVC 14.39.33519 is throwing an error because the noexcept type deduction is not defined in original C++17 // error C2065: 'task': undeclared identifier // see: https://stackoverflow.com/a/72467726 diff --git a/include/alpaka/core/Unreachable.hpp b/include/alpaka/core/Unreachable.hpp index 7b1b9ff8d792..01e2bc34197d 100644 --- a/include/alpaka/core/Unreachable.hpp +++ b/include/alpaka/core/Unreachable.hpp @@ -10,15 +10,15 @@ //! a false warning about a missing return statement unless it is told that the following code section is unreachable. //! //! \param x A dummy value for the expected return type of the calling function. -#if(BOOST_COMP_NVCC && BOOST_ARCH_PTX) -# if BOOST_LANG_CUDA >= BOOST_VERSION_NUMBER(11, 3, 0) +#if(ALPAKA_COMP_NVCC && ALPAKA_ARCH_PTX) +# if ALPAKA_LANG_CUDA >= ALPAKA_VERSION_NUMBER(11, 3, 0) # define ALPAKA_UNREACHABLE(...) __builtin_unreachable() # else # define ALPAKA_UNREACHABLE(...) return __VA_ARGS__ # endif -#elif BOOST_COMP_MSVC +#elif ALPAKA_COMP_MSVC # define ALPAKA_UNREACHABLE(...) __assume(false) -#elif BOOST_COMP_GNUC || BOOST_COMP_CLANG +#elif ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG # define ALPAKA_UNREACHABLE(...) __builtin_unreachable() #else # define ALPAKA_UNREACHABLE(...) diff --git a/include/alpaka/core/Unroll.hpp b/include/alpaka/core/Unroll.hpp index 10794e6e3bf8..c25fd48cf3f5 100644 --- a/include/alpaka/core/Unroll.hpp +++ b/include/alpaka/core/Unroll.hpp @@ -12,13 +12,13 @@ //! `ALPAKA_UNROLL //! for(...){...}` // \TODO: Implement for other compilers. -#if BOOST_ARCH_PTX +#if ALPAKA_ARCH_PTX # define ALPAKA_UNROLL_STRINGIFY(x) #x # define ALPAKA_UNROLL(...) _Pragma(ALPAKA_UNROLL_STRINGIFY(unroll __VA_ARGS__)) -#elif BOOST_COMP_IBM || BOOST_COMP_SUNPRO || BOOST_COMP_HPACC +#elif ALPAKA_COMP_IBM # define ALPAKA_UNROLL_STRINGIFY(x) #x # define ALPAKA_UNROLL(...) _Pragma(ALPAKA_UNROLL_STRINGIFY(unroll(__VA_ARGS__))) -#elif BOOST_COMP_PGI +#elif ALPAKA_COMP_PGI # define ALPAKA_UNROLL(...) _Pragma("unroll") #else # define ALPAKA_UNROLL(...) diff --git a/include/alpaka/core/Utility.hpp b/include/alpaka/core/Utility.hpp index 2610027ef360..bd1238e621f3 100644 --- a/include/alpaka/core/Utility.hpp +++ b/include/alpaka/core/Utility.hpp @@ -15,7 +15,7 @@ namespace alpaka::core // This function is equivalent to std::declval() but can be used // within an alpaka accelerator kernel too. // This function can be used only within std::decltype(). -#if BOOST_LANG_CUDA && BOOST_COMP_CLANG_CUDA || BOOST_COMP_HIP +#if ALPAKA_LANG_CUDA && ALPAKA_COMP_CLANG_CUDA || ALPAKA_COMP_HIP template ALPAKA_FN_HOST_ACC std::add_rvalue_reference_t declval(); #else diff --git a/include/alpaka/core/Vectorize.hpp b/include/alpaka/core/Vectorize.hpp index 55f0e6f73c66..5527460c9895 100644 --- a/include/alpaka/core/Vectorize.hpp +++ b/include/alpaka/core/Vectorize.hpp @@ -16,13 +16,13 @@ //! for(...){...}` // \TODO: Implement for other compilers. // See: http://stackoverflow.com/questions/2706286/pragmas-swp-ivdep-prefetch-support-in-various-compilers -/*#if BOOST_COMP_HPACC +/*#if ALPAKA_COMP_HPACC #define ALPAKA_VECTORIZE_HINT(...) _Pragma("ivdep") -#elif BOOST_COMP_PGI +#elif ALPAKA_COMP_PGI #define ALPAKA_VECTORIZE_HINT(...) _Pragma("vector") -#elif BOOST_COMP_MSVC +#elif ALPAKA_COMP_MSVC #define ALPAKA_VECTORIZE_HINT(...) __pragma(loop(ivdep)) -#elif BOOST_COMP_GNUC +#elif ALPAKA_COMP_GNUC #define ALPAKA_VECTORIZE_HINT(...) _Pragma("GCC ivdep") #else #define ALPAKA_VECTORIZE_HINT(...) diff --git a/include/alpaka/dev/cpu/SysInfo.hpp b/include/alpaka/dev/cpu/SysInfo.hpp index 1dc989ff21cb..4f3555bbb731 100644 --- a/include/alpaka/dev/cpu/SysInfo.hpp +++ b/include/alpaka/dev/cpu/SysInfo.hpp @@ -6,7 +6,7 @@ #include "alpaka/core/BoostPredef.hpp" -#if BOOST_OS_WINDOWS || BOOST_OS_CYGWIN +#if ALPAKA_OS_WINDOWS || ALPAKA_OS_CYGWIN # ifndef NOMINMAX # define NOMINMAX # endif @@ -15,18 +15,18 @@ # endif // We could use some more macros to reduce the number of sub-headers included, but this would restrict user code. # include -#elif BOOST_OS_UNIX || BOOST_OS_MACOS +#elif ALPAKA_OS_LINUX || ALPAKA_OS_IOS # include # include # include # include -# if BOOST_OS_BSD || BOOST_OS_MACOS +# if ALPAKA_OS_IOS # include # endif #endif -#if BOOST_OS_LINUX +#if ALPAKA_OS_LINUX # include #endif @@ -34,10 +34,10 @@ #include #include -#if BOOST_ARCH_X86 -# if BOOST_COMP_GNUC || BOOST_COMP_CLANG || BOOST_COMP_PGI +#if ALPAKA_ARCH_X86 +# if ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG || ALPAKA_COMP_PGI # include -# elif BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +# elif ALPAKA_COMP_MSVC # include # endif #endif @@ -47,14 +47,14 @@ namespace alpaka::cpu::detail constexpr int NO_CPUID = 0; constexpr int UNKNOWN_CPU = 0; constexpr int UNKNOWN_COMPILER = 1; -#if BOOST_ARCH_X86 -# if BOOST_COMP_GNUC || BOOST_COMP_CLANG || BOOST_COMP_PGI +#if ALPAKA_ARCH_X86 +# if ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG || ALPAKA_COMP_PGI inline auto cpuid(std::uint32_t level, std::uint32_t subfunction, std::uint32_t ex[4]) -> void { __cpuid_count(level, subfunction, ex[0], ex[1], ex[2], ex[3]); } -# elif BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +# elif ALPAKA_COMP_MSVC inline auto cpuid(std::uint32_t level, std::uint32_t subfunction, std::uint32_t ex[4]) -> void { __cpuidex(reinterpret_cast(ex), level, subfunction); @@ -93,7 +93,7 @@ namespace alpaka::cpu::detail return ""; } } -#if BOOST_ARCH_X86 +#if ALPAKA_ARCH_X86 // Get the information associated with each extended ID. char cpuBrandString[0x40] = {0}; for(std::uint32_t i(0x8000'0000); i <= nExIds; ++i) @@ -123,11 +123,11 @@ namespace alpaka::cpu::detail //! \return Pagesize in bytes used by the system. inline size_t getPageSize() { -#if BOOST_OS_WINDOWS || BOOST_OS_CYGWIN +#if ALPAKA_OS_WINDOWS || ALPAKA_OS_CYGWIN SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; -#elif BOOST_OS_UNIX || BOOST_OS_MACOS +#elif ALPAKA_OS_LINUX || ALPAKA_OS_IOS # if defined(_SC_PAGESIZE) return static_cast(sysconf(_SC_PAGESIZE)); # else @@ -145,20 +145,20 @@ namespace alpaka::cpu::detail //! http://nadeausoftware.com/articles/2012/09/c_c_tip_how_get_physical_memory_size_system inline auto getTotalGlobalMemSizeBytes() -> std::size_t { -#if BOOST_OS_WINDOWS +#if ALPAKA_OS_WINDOWS MEMORYSTATUSEX status; status.dwLength = sizeof(status); GlobalMemoryStatusEx(&status); return static_cast(status.ullTotalPhys); -#elif BOOST_OS_CYGWIN +#elif ALPAKA_OS_CYGWIN // New 64-bit MEMORYSTATUSEX isn't available. MEMORYSTATUS status; status.dwLength = sizeof(status); GlobalMemoryStatus(&status); return static_cast(status.dwTotalPhys); -#elif BOOST_OS_UNIX || BOOST_OS_MACOS +#elif ALPAKA_OS_LINUX || ALPAKA_OS_IOS // Unix : Prefer sysctl() over sysconf() except sysctl() with HW_REALMEM and HW_PHYSMEM which are not // always reliable # if defined(CTL_HW) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM64)) @@ -208,19 +208,19 @@ namespace alpaka::cpu::detail //! \throws std::logic_error if not implemented on the system and std::runtime_error on other errors. inline auto getFreeGlobalMemSizeBytes() -> std::size_t { -#if BOOST_OS_WINDOWS +#if ALPAKA_OS_WINDOWS MEMORYSTATUSEX status; status.dwLength = sizeof(status); GlobalMemoryStatusEx(&status); return static_cast(status.ullAvailPhys); -#elif BOOST_OS_LINUX +#elif ALPAKA_OS_LINUX # if defined(_SC_AVPHYS_PAGES) return static_cast(sysconf(_SC_AVPHYS_PAGES)) * getPageSize(); # else // this is legacy and only used as fallback return static_cast(get_avphys_pages()) * getPageSize(); # endif -#elif BOOST_OS_MACOS +#elif ALPAKA_OS_IOS int free_pages = 0; std::size_t len = sizeof(free_pages); if(sysctlbyname("vm.page_free_count", &free_pages, &len, nullptr, 0) < 0) diff --git a/include/alpaka/extent/Traits.hpp b/include/alpaka/extent/Traits.hpp index 460269f028c0..6c062e6773a3 100644 --- a/include/alpaka/extent/Traits.hpp +++ b/include/alpaka/extent/Traits.hpp @@ -43,12 +43,12 @@ namespace alpaka [[deprecated("use getExtents(extent)[Tidx] instead")]] ALPAKA_FN_HOST_ACC auto getExtent( TExtent const& extent = TExtent()) -> Idx { -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif return trait::GetExtent, TExtent>::getExtent(extent); -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } diff --git a/include/alpaka/idx/bt/IdxBtUniformCudaHipBuiltIn.hpp b/include/alpaka/idx/bt/IdxBtUniformCudaHipBuiltIn.hpp index ff0366fc51cb..963e5af91a62 100644 --- a/include/alpaka/idx/bt/IdxBtUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/idx/bt/IdxBtUniformCudaHipBuiltIn.hpp @@ -29,11 +29,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/idx/gb/IdxGbUniformCudaHipBuiltIn.hpp b/include/alpaka/idx/gb/IdxGbUniformCudaHipBuiltIn.hpp index a643533f46cd..39eff20a3d15 100644 --- a/include/alpaka/idx/gb/IdxGbUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/idx/gb/IdxGbUniformCudaHipBuiltIn.hpp @@ -29,11 +29,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/intrinsic/IntrinsicCpu.hpp b/include/alpaka/intrinsic/IntrinsicCpu.hpp index 5db927bd44a8..5f263ff73935 100644 --- a/include/alpaka/intrinsic/IntrinsicCpu.hpp +++ b/include/alpaka/intrinsic/IntrinsicCpu.hpp @@ -18,7 +18,7 @@ # include #endif -#if BOOST_COMP_MSVC +#if ALPAKA_COMP_MSVC # include #endif @@ -39,12 +39,12 @@ namespace alpaka { #ifdef __cpp_lib_bitops return std::popcount(value); -#elif BOOST_COMP_GNUC || BOOST_COMP_CLANG +#elif ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG if constexpr(sizeof(UnsignedIntegral) == 8) return __builtin_popcountll(value); else return __builtin_popcount(value); -#elif BOOST_COMP_MSVC +#elif ALPAKA_COMP_MSVC if constexpr(sizeof(UnsignedIntegral) == 8) return static_cast(__popcnt64(value)); else @@ -65,12 +65,12 @@ namespace alpaka { #ifdef __cpp_lib_bitops return value == 0 ? 0 : std::countr_zero(static_cast>(value)) + 1; -#elif BOOST_COMP_GNUC || BOOST_COMP_CLANG +#elif ALPAKA_COMP_GNUC || ALPAKA_COMP_CLANG if constexpr(sizeof(Integral) == 8) return __builtin_ffsll(value); else return __builtin_ffs(value); -#elif BOOST_COMP_MSVC +#elif ALPAKA_COMP_MSVC // Implementation based on // https://gitlab.freedesktop.org/cairo/cairo/commit/f5167dc2e1a13d8c4e5d66d7178a24b9b5e7ac7a unsigned long index = 0u; diff --git a/include/alpaka/intrinsic/IntrinsicUniformCudaHipBuiltIn.hpp b/include/alpaka/intrinsic/IntrinsicUniformCudaHipBuiltIn.hpp index 13bddf03ed1e..02c53c2ed7e8 100644 --- a/include/alpaka/intrinsic/IntrinsicUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/intrinsic/IntrinsicUniformCudaHipBuiltIn.hpp @@ -20,11 +20,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif @@ -37,7 +37,7 @@ namespace alpaka -> std::int32_t { // clang as CUDA compiler change the interface to unsigned values for clang >=18 -# if BOOST_COMP_CLANG && BOOST_LANG_CUDA && BOOST_COMP_CLANG < BOOST_VERSION_NUMBER(18, 0, 0) +# if ALPAKA_COMP_CLANG && ALPAKA_LANG_CUDA && ALPAKA_COMP_CLANG < ALPAKA_VERSION_NUMBER(18, 0, 0) return __popc(static_cast(value)); # else return static_cast(__popc(static_cast(value))); @@ -48,7 +48,7 @@ namespace alpaka -> std::int32_t { // clang as CUDA compiler change the interface to unsigned values for clang >=18 -# if BOOST_COMP_CLANG && BOOST_LANG_CUDA && BOOST_COMP_CLANG < BOOST_VERSION_NUMBER(18, 0, 0) +# if ALPAKA_COMP_CLANG && ALPAKA_LANG_CUDA && ALPAKA_COMP_CLANG < ALPAKA_VERSION_NUMBER(18, 0, 0) return __popcll(static_cast(value)); # else return static_cast(__popcll(static_cast(value))); diff --git a/include/alpaka/kernel/TaskKernelCpuOmp2Blocks.hpp b/include/alpaka/kernel/TaskKernelCpuOmp2Blocks.hpp index f0d605661d7a..d0b9629b9854 100644 --- a/include/alpaka/kernel/TaskKernelCpuOmp2Blocks.hpp +++ b/include/alpaka/kernel/TaskKernelCpuOmp2Blocks.hpp @@ -33,7 +33,7 @@ #ifdef ALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLED -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wswitch-default" # endif @@ -984,7 +984,7 @@ namespace alpaka } // namespace trait } // namespace alpaka -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/include/alpaka/kernel/TaskKernelCpuOmp2Threads.hpp b/include/alpaka/kernel/TaskKernelCpuOmp2Threads.hpp index 6b08e9693a0e..ebe2d42b9ccf 100644 --- a/include/alpaka/kernel/TaskKernelCpuOmp2Threads.hpp +++ b/include/alpaka/kernel/TaskKernelCpuOmp2Threads.hpp @@ -115,7 +115,7 @@ namespace alpaka # pragma omp parallel num_threads(iBlockThreadCount) { // The guard is for gcc internal compiler error, as discussed in #735 - if constexpr((!BOOST_COMP_GNUC) || (BOOST_COMP_GNUC >= BOOST_VERSION_NUMBER(8, 1, 0))) + if constexpr((!ALPAKA_COMP_GNUC) || (ALPAKA_COMP_GNUC >= ALPAKA_VERSION_NUMBER(8, 1, 0))) { # pragma omp single nowait { diff --git a/include/alpaka/kernel/TaskKernelGenericSycl.hpp b/include/alpaka/kernel/TaskKernelGenericSycl.hpp index 11cc2cae4590..4dca7f25cd39 100644 --- a/include/alpaka/kernel/TaskKernelGenericSycl.hpp +++ b/include/alpaka/kernel/TaskKernelGenericSycl.hpp @@ -29,7 +29,7 @@ #ifdef ALPAKA_ACC_SYCL_ENABLED -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunused-lambda-capture" # pragma clang diagnostic ignored "-Wunused-parameter" @@ -238,7 +238,7 @@ namespace alpaka } // namespace alpaka -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/include/alpaka/kernel/TaskKernelGpuUniformCudaHipRt.hpp b/include/alpaka/kernel/TaskKernelGpuUniformCudaHipRt.hpp index 53bbaf67529f..dc630bd9960c 100644 --- a/include/alpaka/kernel/TaskKernelGpuUniformCudaHipRt.hpp +++ b/include/alpaka/kernel/TaskKernelGpuUniformCudaHipRt.hpp @@ -38,11 +38,11 @@ # include "alpaka/core/BoostPredef.hpp" -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif @@ -50,7 +50,7 @@ namespace alpaka { namespace detail { -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunused-template" # endif @@ -67,14 +67,14 @@ namespace alpaka TAcc const acc(threadElemExtent); // with clang it is not possible to query std::result_of for a pure device lambda created on the host side -# if !(BOOST_COMP_CLANG_CUDA && BOOST_COMP_CLANG) +# if !(ALPAKA_COMP_CLANG_CUDA && ALPAKA_COMP_CLANG) static_assert( std::is_same_v(acc), args...)), void>, "The TKernelFnObj is required to return void!"); # endif kernelFnObj(const_cast(acc), args...); } -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif } // namespace detail @@ -325,14 +325,14 @@ namespace alpaka remove_restrict_t>...>; typename TApi::FuncAttributes_t funcAttrs; -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC // Disable and enable compile warnings for gcc # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wconditionally-supported" # endif ALPAKA_UNIFORM_CUDA_HIP_RT_CHECK( TApi::funcGetAttributes(&funcAttrs, reinterpret_cast(kernelName))); -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif diff --git a/include/alpaka/kernel/Traits.hpp b/include/alpaka/kernel/Traits.hpp index c2c0a55b1f7a..895de7893b80 100644 --- a/include/alpaka/kernel/Traits.hpp +++ b/include/alpaka/kernel/Traits.hpp @@ -43,7 +43,7 @@ namespace alpaka template struct BlockSharedMemDynSizeBytes { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -55,7 +55,7 @@ namespace alpaka //! \param args,... The kernel invocation arguments. //! \return The size of the shared memory allocated for a block in bytes. //! The default version always returns zero. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif ALPAKA_NO_HOST_ACC_WARNING @@ -133,7 +133,7 @@ namespace alpaka }; public: -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -145,7 +145,7 @@ namespace alpaka //! \param args,... The kernel invocation arguments. //! \return The OpenMP schedule information as an alpaka::omp::Schedule object, //! returning an object of any other type is treated as if the trait is not specialized. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif ALPAKA_NO_HOST_ACC_WARNING @@ -161,7 +161,7 @@ namespace alpaka }; } // namespace trait -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -173,7 +173,7 @@ namespace alpaka //! \param args,... The kernel invocation arguments. //! \return The size of the shared memory allocated for a block in bytes. //! The default implementation always returns zero. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif ALPAKA_NO_HOST_ACC_WARNING @@ -210,7 +210,7 @@ namespace alpaka std::forward(args)...); } -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -222,7 +222,7 @@ namespace alpaka //! \param args,... The kernel invocation arguments. //! \return The OpenMP schedule information as an alpaka::omp::Schedule object if the kernel specialized the //! OmpSchedule trait, an object of another type if the kernel didn't specialize the trait. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif template @@ -239,7 +239,7 @@ namespace alpaka args...); } -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -303,7 +303,7 @@ namespace alpaka //! @{ template struct IsKernelTriviallyCopyable -#if BOOST_COMP_NVCC +#if ALPAKA_COMP_NVCC : std::bool_constant< std::is_trivially_copyable_v || __nv_is_extended_device_lambda_closure_type(T) || __nv_is_extended_host_device_lambda_closure_type(T)> @@ -325,7 +325,7 @@ namespace alpaka //! \param kernelFnObj The kernel function object which should be executed. //! \param args,... The kernel invocation arguments. //! \return The kernel execution task. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif template @@ -334,7 +334,7 @@ namespace alpaka // check for void return type detail::CheckFnReturnType{}(kernelFnObj, args...); -#if BOOST_COMP_NVCC +#if ALPAKA_COMP_NVCC static_assert( isKernelTriviallyCopyable, "Kernels must be trivially copyable or an extended CUDA lambda expression!"); @@ -359,7 +359,7 @@ namespace alpaka std::forward(args)...); } -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored \ "-Wdocumentation" // clang does not support the syntax for variadic template arguments "args,..." @@ -371,7 +371,7 @@ namespace alpaka //! \param workDiv The index domain work division. //! \param kernelFnObj The kernel function object which should be executed. //! \param args,... The kernel invocation arguments. -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif template diff --git a/include/alpaka/math/MathUniformCudaHipBuiltIn.hpp b/include/alpaka/math/MathUniformCudaHipBuiltIn.hpp index ef89423f7c62..878f5eda7049 100644 --- a/include/alpaka/math/MathUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/math/MathUniformCudaHipBuiltIn.hpp @@ -264,11 +264,11 @@ namespace alpaka::math # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/mem/buf/BufUniformCudaHipRt.hpp b/include/alpaka/mem/buf/BufUniformCudaHipRt.hpp index 826edaba7b1e..5011dfb81797 100644 --- a/include/alpaka/mem/buf/BufUniformCudaHipRt.hpp +++ b/include/alpaka/mem/buf/BufUniformCudaHipRt.hpp @@ -275,12 +275,12 @@ namespace alpaka { # if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) static_assert( - std::is_same_v && TApi::version >= BOOST_VERSION_NUMBER(11, 2, 0), + std::is_same_v && TApi::version >= ALPAKA_VERSION_NUMBER(11, 2, 0), "Support for stream-ordered memory buffers requires CUDA 11.2 or higher."); # endif # if defined(ALPAKA_ACC_GPU_HIP_ENABLED) static_assert( - std::is_same_v && TApi::version >= BOOST_VERSION_NUMBER(5, 3, 0), + std::is_same_v && TApi::version >= ALPAKA_VERSION_NUMBER(5, 3, 0), "Support for stream-ordered memory buffers requires HIP/ROCm 5.3 or higher."); # endif static_assert( @@ -324,9 +324,9 @@ namespace alpaka TDim::value <= 1 && ( # if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) - std::is_same_v && TApi::version >= BOOST_VERSION_NUMBER(11, 2, 0) + std::is_same_v && TApi::version >= ALPAKA_VERSION_NUMBER(11, 2, 0) # elif defined(ALPAKA_ACC_GPU_HIP_ENABLED) - std::is_same_v && TApi::version >= BOOST_VERSION_NUMBER(5, 3, 0) + std::is_same_v && TApi::version >= ALPAKA_VERSION_NUMBER(5, 3, 0) # else false # endif diff --git a/include/alpaka/mem/buf/Traits.hpp b/include/alpaka/mem/buf/Traits.hpp index e29cf5bf39b1..5460101ea14d 100644 --- a/include/alpaka/mem/buf/Traits.hpp +++ b/include/alpaka/mem/buf/Traits.hpp @@ -83,7 +83,7 @@ namespace alpaka /* TODO: Remove this pragma block once support for clang versions <= 13 is removed. These versions are unable to figure out that the template parameters are attached to a C++17 inline variable. */ -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdocumentation" #endif @@ -93,7 +93,7 @@ namespace alpaka //! \tparam TDim The dimensionality of the buffer to allocate. template inline constexpr bool hasAsyncBufSupport = trait::HasAsyncBufSupport::value; -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif @@ -145,7 +145,7 @@ namespace alpaka /* TODO: Remove this pragma block once support for clang versions <= 13 is removed. These versions are unable to figure out that the template parameters are attached to a C++17 inline variable. */ -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdocumentation" #endif @@ -154,7 +154,7 @@ namespace alpaka //! \tparam TPlatform The platform from which the buffer is accessible. template inline constexpr bool hasMappedBufSupport = trait::HasMappedBufSupport::value; -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif diff --git a/include/alpaka/mem/fence/MemFenceUniformCudaHipBuiltIn.hpp b/include/alpaka/mem/fence/MemFenceUniformCudaHipBuiltIn.hpp index d94b1bc3ff2b..6fe961e634af 100644 --- a/include/alpaka/mem/fence/MemFenceUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/mem/fence/MemFenceUniformCudaHipBuiltIn.hpp @@ -19,11 +19,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/mem/view/Traits.hpp b/include/alpaka/mem/view/Traits.hpp index 5a9db5b2f3cc..f261468e46ed 100644 --- a/include/alpaka/mem/view/Traits.hpp +++ b/include/alpaka/mem/view/Traits.hpp @@ -79,12 +79,12 @@ namespace alpaka constexpr auto viewDim = Dim::value; if constexpr(idx < viewDim - 1) { -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif return getExtents(view)[idx] * GetPitchBytes, TView>::getPitchBytes(view); -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } @@ -175,12 +175,12 @@ namespace alpaka template [[deprecated("Use getPitchesInBytes instead")]] ALPAKA_FN_HOST auto getPitchBytes(TView const& view) -> Idx { -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif return trait::GetPitchBytes, TView>::getPitchBytes(view); -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } @@ -535,12 +535,12 @@ namespace alpaka ALPAKA_FN_HOST_ACC constexpr reference access(data_handle_type p, size_t i) const noexcept { assert(i % alignof(ElementType) == 0); -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wcast-align" # endif return *reinterpret_cast(p + i); -# if BOOST_COMP_GNUC +# if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop # endif } diff --git a/include/alpaka/mem/view/ViewSubView.hpp b/include/alpaka/mem/view/ViewSubView.hpp index a35fa22b4b65..da220cdb349b 100644 --- a/include/alpaka/mem/view/ViewSubView.hpp +++ b/include/alpaka/mem/view/ViewSubView.hpp @@ -92,7 +92,7 @@ namespace alpaka public: ALPAKA_FN_HOST auto computeNativePtr() { -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push // "cast from 'std::uint8_t*' to 'TElem*' increases required alignment of target type" # pragma GCC diagnostic ignored "-Wcast-align" @@ -100,7 +100,7 @@ namespace alpaka return reinterpret_cast( reinterpret_cast(alpaka::getPtrNative(m_viewParentView)) + (m_offsetsElements * getPitchesInBytes(m_viewParentView)).sum()); -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } diff --git a/include/alpaka/offset/Traits.hpp b/include/alpaka/offset/Traits.hpp index c2edb3bc3d5c..412b2603a578 100644 --- a/include/alpaka/offset/Traits.hpp +++ b/include/alpaka/offset/Traits.hpp @@ -39,12 +39,12 @@ namespace alpaka [[deprecated("use getOffsets(offsets)[Tidx] instead")]] ALPAKA_FN_HOST_ACC auto getOffset(TOffsets const& offsets) -> Idx { -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif return trait::GetOffset, TOffsets>::getOffset(offsets); -#if BOOST_COMP_CLANG || BOOST_COMP_GNUC +#if ALPAKA_COMP_CLANG || ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } diff --git a/include/alpaka/platform/PlatformCpu.hpp b/include/alpaka/platform/PlatformCpu.hpp index c431fd418785..8b7f32c96e81 100644 --- a/include/alpaka/platform/PlatformCpu.hpp +++ b/include/alpaka/platform/PlatformCpu.hpp @@ -16,8 +16,8 @@ namespace alpaka //! The CPU device platform. struct PlatformCpu : concepts::Implements { -#if defined(BOOST_COMP_GNUC) && BOOST_COMP_GNUC >= BOOST_VERSION_NUMBER(11, 0, 0) \ - && BOOST_COMP_GNUC < BOOST_VERSION_NUMBER(12, 0, 0) +#if defined(ALPAKA_COMP_GNUC) && ALPAKA_COMP_GNUC >= ALPAKA_VERSION_NUMBER(11, 0, 0) \ + && ALPAKA_COMP_GNUC < ALPAKA_VERSION_NUMBER(12, 0, 0) // This is a workaround for g++-11 bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96295 // g++-11 complains in *all* places where a PlatformCpu is used, that it "may be used uninitialized" char c = {}; diff --git a/include/alpaka/platform/PlatformFpgaSyclIntel.hpp b/include/alpaka/platform/PlatformFpgaSyclIntel.hpp index a3a73423f3c2..cab476d76a6b 100644 --- a/include/alpaka/platform/PlatformFpgaSyclIntel.hpp +++ b/include/alpaka/platform/PlatformFpgaSyclIntel.hpp @@ -18,7 +18,7 @@ namespace alpaka { // Prevent clang from annoying us with warnings about emitting too many vtables. These are discarded by the // linker anyway. -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wweak-vtables" # endif @@ -39,7 +39,7 @@ namespace alpaka return is_intel_fpga ? 1 : -1; } }; -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif } // namespace detail diff --git a/include/alpaka/platform/PlatformGenericSycl.hpp b/include/alpaka/platform/PlatformGenericSycl.hpp index 12e00fcf70c9..78c78be592ca 100644 --- a/include/alpaka/platform/PlatformGenericSycl.hpp +++ b/include/alpaka/platform/PlatformGenericSycl.hpp @@ -21,7 +21,7 @@ #ifdef ALPAKA_ACC_SYCL_ENABLED -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wswitch-default" # endif @@ -207,15 +207,15 @@ namespace alpaka std::cout << "SYCL version: " << device.get_info() << '\n'; -# if !defined(BOOST_COMP_ICPX) +# if !defined(ALPAKA_COMP_ICPX) // Not defined by Level Zero back-end std::cout << "Backend version: " << device.get_info() << '\n'; # endif std::cout << "Aspects: " << '\n'; -# if defined(BOOST_COMP_ICPX) -# if BOOST_COMP_ICPX >= BOOST_VERSION_NUMBER(53, 2, 0) +# if defined(ALPAKA_COMP_ICPX) +# if ALPAKA_COMP_ICPX >= ALPAKA_VERSION_NUMBER(53, 2, 0) // These aspects are missing from oneAPI versions < 2023.2.0 if(device.has(sycl::aspect::emulated)) std::cout << "\t* emulated\n"; @@ -514,7 +514,7 @@ namespace alpaka case sycl::memory_order::seq_cst: std::cout << "seq_cst"; break; -# if defined(BOOST_COMP_ICPX) +# if defined(ALPAKA_COMP_ICPX) // Stop icpx from complaining about its own internals. case sycl::memory_order::__consume_unsupported: break; @@ -529,8 +529,8 @@ namespace alpaka auto const mem_orders = device.get_info(); print_memory_orders(mem_orders); -# if defined(BOOST_COMP_ICPX) -# if BOOST_COMP_ICPX >= BOOST_VERSION_NUMBER(53, 2, 0) +# if defined(ALPAKA_COMP_ICPX) +# if ALPAKA_COMP_ICPX >= ALPAKA_VERSION_NUMBER(53, 2, 0) // Not implemented in oneAPI < 2023.2.0 std::cout << "Supported memory orderings for sycl::atomic_fence: "; auto const fence_orders = device.get_info(); @@ -573,8 +573,8 @@ namespace alpaka auto const mem_scopes = device.get_info(); print_memory_scopes(mem_scopes); -# if defined(BOOST_COMP_ICPX) -# if BOOST_COMP_ICPX >= BOOST_VERSION_NUMBER(53, 2, 0) +# if defined(ALPAKA_COMP_ICPX) +# if ALPAKA_COMP_ICPX >= ALPAKA_VERSION_NUMBER(53, 2, 0) // Not implemented in oneAPI < 2023.2.0 std::cout << "Supported memory scopes for sycl::atomic_fence: "; auto const fence_scopes = device.get_info(); @@ -620,7 +620,7 @@ namespace alpaka std::cout << "by affinity domain"; has_affinity_domains = true; break; -# if defined(BOOST_COMP_ICPX) +# if defined(ALPAKA_COMP_ICPX) case sycl::info::partition_property::ext_intel_partition_by_cslice: std::cout << "by compute slice (Intel extension; deprecated)"; break; @@ -690,7 +690,7 @@ namespace alpaka std::cout << "partitioned by affinity domain"; break; -# if defined(BOOST_COMP_ICPX) +# if defined(ALPAKA_COMP_ICPX) case sycl::info::partition_property::ext_intel_partition_by_cslice: std::cout << "partitioned by compute slice (Intel extension; deprecated)"; break; @@ -739,7 +739,7 @@ namespace alpaka } // namespace trait } // namespace alpaka -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/include/alpaka/platform/PlatformUniformCudaHipRt.hpp b/include/alpaka/platform/PlatformUniformCudaHipRt.hpp index a3ae0ef04dc5..ef0f26e2b61e 100644 --- a/include/alpaka/platform/PlatformUniformCudaHipRt.hpp +++ b/include/alpaka/platform/PlatformUniformCudaHipRt.hpp @@ -28,8 +28,8 @@ namespace alpaka template struct PlatformUniformCudaHipRt : concepts::Implements> { -# if defined(BOOST_COMP_GNUC) && BOOST_COMP_GNUC >= BOOST_VERSION_NUMBER(11, 0, 0) \ - && BOOST_COMP_GNUC < BOOST_VERSION_NUMBER(12, 0, 0) +# if defined(ALPAKA_COMP_GNUC) && ALPAKA_COMP_GNUC >= ALPAKA_VERSION_NUMBER(11, 0, 0) \ + && ALPAKA_COMP_GNUC < ALPAKA_VERSION_NUMBER(12, 0, 0) // This is a workaround for g++-11 bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96295 // g++-11 complains in *all* places where a PlatformCpu is used, that it "may be used uninitialized" char c = {}; diff --git a/include/alpaka/queue/QueueGenericThreadsBlocking.hpp b/include/alpaka/queue/QueueGenericThreadsBlocking.hpp index 65361bd0a0ad..a536593afd71 100644 --- a/include/alpaka/queue/QueueGenericThreadsBlocking.hpp +++ b/include/alpaka/queue/QueueGenericThreadsBlocking.hpp @@ -23,7 +23,7 @@ namespace alpaka { namespace detail { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG // avoid diagnostic warning: "has no out-of-line virtual method definitions; its vtable will be emitted in every // translation unit [-Werror,-Wweak-vtables]" https://stackoverflow.com/a/29288300 # pragma clang diagnostic push @@ -32,7 +32,7 @@ namespace alpaka //! The CPU device queue implementation. template class QueueGenericThreadsBlockingImpl final : public IGenericThreadsQueue -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif { diff --git a/include/alpaka/queue/QueueGenericThreadsNonBlocking.hpp b/include/alpaka/queue/QueueGenericThreadsNonBlocking.hpp index 4e02a911a383..335804b621e7 100644 --- a/include/alpaka/queue/QueueGenericThreadsNonBlocking.hpp +++ b/include/alpaka/queue/QueueGenericThreadsNonBlocking.hpp @@ -28,7 +28,7 @@ namespace alpaka { namespace detail { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG // avoid diagnostic warning: "has no out-of-line virtual method definitions; its vtable will be emitted in every // translation unit [-Werror,-Wweak-vtables]" https://stackoverflow.com/a/29288300 # pragma clang diagnostic push @@ -37,7 +37,7 @@ namespace alpaka //! The CPU device queue implementation. template class QueueGenericThreadsNonBlockingImpl final : public IGenericThreadsQueue -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif { diff --git a/include/alpaka/queue/cpu/IGenericThreadsQueue.hpp b/include/alpaka/queue/cpu/IGenericThreadsQueue.hpp index 3d82a9c6a133..21e19caa8596 100644 --- a/include/alpaka/queue/cpu/IGenericThreadsQueue.hpp +++ b/include/alpaka/queue/cpu/IGenericThreadsQueue.hpp @@ -11,7 +11,7 @@ namespace alpaka template class EventGenericThreads; -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG // avoid diagnostic warning: "has no out-of-line virtual method definitions; its vtable will be emitted in every // translation unit [-Werror,-Wweak-vtables]" https://stackoverflow.com/a/29288300 # pragma clang diagnostic push @@ -29,7 +29,7 @@ namespace alpaka virtual void wait(EventGenericThreads const&) = 0; virtual ~IGenericThreadsQueue() = default; }; -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif } // namespace alpaka diff --git a/include/alpaka/rand/RandGenericSycl.hpp b/include/alpaka/rand/RandGenericSycl.hpp index c114a4fd7ca6..14e180631b9c 100644 --- a/include/alpaka/rand/RandGenericSycl.hpp +++ b/include/alpaka/rand/RandGenericSycl.hpp @@ -13,7 +13,7 @@ // Backend specific imports. # include -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wcast-align" # pragma clang diagnostic ignored "-Wcast-qual" @@ -26,7 +26,7 @@ # pragma clang diagnostic ignored "-Wundef" # endif # include -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/include/alpaka/rand/RandUniformCudaHipRand.hpp b/include/alpaka/rand/RandUniformCudaHipRand.hpp index 63ffea909e25..19efc16d1f2b 100644 --- a/include/alpaka/rand/RandUniformCudaHipRand.hpp +++ b/include/alpaka/rand/RandUniformCudaHipRand.hpp @@ -18,7 +18,7 @@ # if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) # include # elif defined(ALPAKA_ACC_GPU_HIP_ENABLED) -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wduplicate-decl-specifier" # endif @@ -29,7 +29,7 @@ # include # endif -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif # endif @@ -44,11 +44,11 @@ namespace alpaka::rand # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/rand/TinyMT/tinymt32.h b/include/alpaka/rand/TinyMT/tinymt32.h index 55a946f2d435..0270eba6fde8 100644 --- a/include/alpaka/rand/TinyMT/tinymt32.h +++ b/include/alpaka/rand/TinyMT/tinymt32.h @@ -34,16 +34,16 @@ #endif #include -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wold-style-cast" # pragma clang diagnostic ignored "-Wunused-function" #endif -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wold-style-cast" #endif -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC #pragma warning(push) #pragma warning(disable: 4100) // tinymt32.h(60): warning C4100: 'random': unreferenced formal parameter #endif @@ -416,13 +416,13 @@ void tinymt32_init_by_array(tinymt32_t * random, uint32_t init_key[], #undef MIN_LOOP #undef PRE_LOOP -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(pop) #endif diff --git a/include/alpaka/standalone/GpuCudaRt.hpp b/include/alpaka/standalone/GpuCudaRt.hpp index eeaae154e00b..53150ad61946 100644 --- a/include/alpaka/standalone/GpuCudaRt.hpp +++ b/include/alpaka/standalone/GpuCudaRt.hpp @@ -10,7 +10,7 @@ #include "alpaka/core/BoostPredef.hpp" -#if defined(BOOST_COMP_CLANG_CUDA) && (BOOST_COMP_CLANG_CUDA == BOOST_VERSION_NUMBER(14, 0, 0)) +#if defined(ALPAKA_COMP_CLANG_CUDA) && (ALPAKA_COMP_CLANG_CUDA == ALPAKA_VERSION_NUMBER(14, 0, 0)) # include diff --git a/include/alpaka/test/KernelExecutionFixture.hpp b/include/alpaka/test/KernelExecutionFixture.hpp index 0e59344497ed..a26094614bf7 100644 --- a/include/alpaka/test/KernelExecutionFixture.hpp +++ b/include/alpaka/test/KernelExecutionFixture.hpp @@ -6,11 +6,11 @@ #include "alpaka/alpaka.hpp" -#if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +#if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! #endif -#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! #endif diff --git a/include/alpaka/test/acc/TestAccs.hpp b/include/alpaka/test/acc/TestAccs.hpp index 2370fa42686b..42e270108b07 100644 --- a/include/alpaka/test/acc/TestAccs.hpp +++ b/include/alpaka/test/acc/TestAccs.hpp @@ -17,8 +17,8 @@ // we have to dramatically reduce the number of tested combinations. // Else the log length would be exceeded. #if defined(ALPAKA_CI) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA \ - || defined(ALPAKA_ACC_GPU_HIP_ENABLED) && BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA \ + || defined(ALPAKA_ACC_GPU_HIP_ENABLED) && ALPAKA_LANG_HIP # define ALPAKA_CUDA_CI # endif #endif @@ -63,14 +63,14 @@ namespace alpaka::test template using AccCpuOmp2ThreadsIfAvailableElseInt = int; #endif -#if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && (BOOST_LANG_CUDA || defined(ALPAKA_HOST_ONLY)) +#if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && (ALPAKA_LANG_CUDA || defined(ALPAKA_HOST_ONLY)) template using AccGpuCudaRtIfAvailableElseInt = AccGpuCudaRt; #else template using AccGpuCudaRtIfAvailableElseInt = int; #endif -#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && (BOOST_LANG_HIP || defined(ALPAKA_HOST_ONLY)) +#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && (ALPAKA_LANG_HIP || defined(ALPAKA_HOST_ONLY)) template using AccGpuHipRtIfAvailableElseInt = typename std::conditional> == false, AccGpuHipRt, int>::type; diff --git a/include/alpaka/test/event/EventHostManualTrigger.hpp b/include/alpaka/test/event/EventHostManualTrigger.hpp index 653dbbb641f3..2bf2d0805d0c 100644 --- a/include/alpaka/test/event/EventHostManualTrigger.hpp +++ b/include/alpaka/test/event/EventHostManualTrigger.hpp @@ -238,7 +238,7 @@ namespace alpaka::trait # include -# if !BOOST_LANG_CUDA && !defined(ALPAKA_HOST_ONLY) +# if !ALPAKA_LANG_CUDA && !defined(ALPAKA_HOST_ONLY) # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif @@ -478,7 +478,7 @@ namespace alpaka::trait # include -# if !BOOST_LANG_HIP && !defined(ALPAKA_HOST_ONLY) +# if !ALPAKA_LANG_HIP && !defined(ALPAKA_HOST_ONLY) # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/test/mem/view/Iterator.hpp b/include/alpaka/test/mem/view/Iterator.hpp index 314d1c0c9d53..1b1f1220b4ae 100644 --- a/include/alpaka/test/mem/view/Iterator.hpp +++ b/include/alpaka/test/mem/view/Iterator.hpp @@ -86,13 +86,13 @@ namespace alpaka::test = mapIdx(Vec, Idx>{m_currentIdx}, m_extents); auto const offsetInBytes = (currentIdxDimx * m_pitchBytes).sum(); using QualifiedByte = MimicConst; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push // "cast from 'Byte*' to 'Elem*' increases required alignment of target type" # pragma GCC diagnostic ignored "-Wcast-align" #endif return *reinterpret_cast(reinterpret_cast(m_nativePtr) + offsetInBytes); -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif } diff --git a/include/alpaka/test/mem/view/ViewTest.hpp b/include/alpaka/test/mem/view/ViewTest.hpp index eef3b5a14247..26dd07dc3404 100644 --- a/include/alpaka/test/mem/view/ViewTest.hpp +++ b/include/alpaka/test/mem/view/ViewTest.hpp @@ -143,7 +143,7 @@ namespace alpaka::test } //! Compares iterators element-wise -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wfloat-equal" // "comparing floating point with == or != is unsafe" #endif @@ -160,18 +160,18 @@ namespace alpaka::test { for(; beginA != endA; ++beginA, ++beginB) { -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wfloat-equal" // "comparing floating point with == or != is unsafe" #endif ALPAKA_CHECK(*success, *beginA == *beginB); -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif } } }; -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif diff --git a/include/alpaka/test/queue/QueueCpuOmp2Collective.hpp b/include/alpaka/test/queue/QueueCpuOmp2Collective.hpp index 4b346c891373..b33db0408ae7 100644 --- a/include/alpaka/test/queue/QueueCpuOmp2Collective.hpp +++ b/include/alpaka/test/queue/QueueCpuOmp2Collective.hpp @@ -31,7 +31,7 @@ namespace alpaka { namespace cpu::detail { -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG // avoid diagnostic warning: "has no out-of-line virtual method definitions; its vtable will be emitted in every // translation unit [-Werror,-Wweak-vtables]" https://stackoverflow.com/a/29288300 # pragma clang diagnostic push @@ -39,7 +39,7 @@ namespace alpaka # endif //! The CPU collective device queue implementation. struct QueueCpuOmp2CollectiveImpl final : cpu::ICpuQueue -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif { diff --git a/include/alpaka/vec/Vec.hpp b/include/alpaka/vec/Vec.hpp index d327f60ff455..ad0f6f1ee2ce 100644 --- a/include/alpaka/vec/Vec.hpp +++ b/include/alpaka/vec/Vec.hpp @@ -56,8 +56,8 @@ namespace alpaka //! Value constructor. //! This constructor is only available if the number of parameters matches the vector idx. ALPAKA_NO_HOST_ACC_WARNING -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC >= BOOST_VERSION_NUMBER(11, 3, 0) \ - && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 4, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC >= ALPAKA_VERSION_NUMBER(11, 3, 0) \ + && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 4, 0) // This constructor tries to avoid SFINAE, which crashes nvcc 11.3. We also need to have a first // argument, so an unconstrained ctor with forwarding references does not hijack the compiler provided // copy-ctor. @@ -82,8 +82,8 @@ namespace alpaka //! Generator constructor. //! Initializes the vector with the values returned from generator(IC) in order, where IC::value runs from 0 to //! TDim - 1 (inclusive). -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC >= BOOST_VERSION_NUMBER(11, 3, 0) \ - && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 4, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC >= ALPAKA_VERSION_NUMBER(11, 3, 0) \ + && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 4, 0) template ALPAKA_FN_HOST_ACC constexpr explicit Vec( F&& generator, @@ -287,7 +287,7 @@ namespace alpaka } // suppress strange warning produced by nvcc+MSVC in release mode -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(push) # pragma warning(disable : 4702) // unreachable code #endif @@ -297,7 +297,7 @@ namespace alpaka { return foldrAll(std::multiplies{}, TVal{1}); } -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(pop) #endif //! \return The sum of all values. @@ -373,7 +373,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator+(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -390,17 +390,17 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator-(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) #endif { -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_suppress = unsigned_compare_with_zero #endif for(typename TDim::value_type i = 0; i < TDim::value; ++i) -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_default = unsigned_compare_with_zero #endif r[i] = p[i] - q[i]; @@ -413,7 +413,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator*(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -428,17 +428,17 @@ namespace alpaka ALPAKA_NO_HOST_ACC_WARNING ALPAKA_FN_HOST_ACC friend constexpr auto operator==(Vec const& a, Vec const& b) -> bool { -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) #endif { -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_suppress = unsigned_compare_with_zero #endif for(typename TDim::value_type i(0); i < TDim::value; ++i) -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_default = unsigned_compare_with_zero #endif { @@ -460,7 +460,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator<(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -477,7 +477,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator<=(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -494,7 +494,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator>(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -511,7 +511,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator>=(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -528,7 +528,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator&&(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -545,7 +545,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC friend constexpr auto operator||(Vec const& p, Vec const& q) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -560,17 +560,17 @@ namespace alpaka ALPAKA_FN_HOST friend constexpr auto operator<<(std::ostream& os, Vec const& v) -> std::ostream& { os << "("; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) #endif { -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_suppress = unsigned_compare_with_zero #endif for(typename TDim::value_type i = 0; i < TDim::value; ++i) -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) # pragma diag_default = unsigned_compare_with_zero #endif { @@ -606,7 +606,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC constexpr auto toArray(Vec const& v) -> std::array { std::array a{}; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -628,7 +628,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC constexpr auto elementwise_min(Vec const& p, Vecs const&... qs) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -650,7 +650,7 @@ namespace alpaka ALPAKA_FN_HOST_ACC constexpr auto elementwise_max(Vec const& p, Vecs const&... qs) -> Vec { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) @@ -714,7 +714,7 @@ namespace alpaka else { Vec r; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(TDim::value > 0) #else if constexpr(TDim::value > 0) diff --git a/include/alpaka/version.hpp b/include/alpaka/version.hpp index c81c86f8fef6..5595ceaca77a 100644 --- a/include/alpaka/version.hpp +++ b/include/alpaka/version.hpp @@ -4,11 +4,11 @@ #pragma once -#include +#include "alpaka/core/BoostPredef.hpp" #define ALPAKA_VERSION_MAJOR 2 #define ALPAKA_VERSION_MINOR 0 #define ALPAKA_VERSION_PATCH 0 //! The alpaka library version number -#define ALPAKA_VERSION BOOST_VERSION_NUMBER(ALPAKA_VERSION_MAJOR, ALPAKA_VERSION_MINOR, ALPAKA_VERSION_PATCH) +#define ALPAKA_VERSION ALPAKA_VERSION_NUMBER(ALPAKA_VERSION_MAJOR, ALPAKA_VERSION_MINOR, ALPAKA_VERSION_PATCH) diff --git a/include/alpaka/warp/WarpUniformCudaHipBuiltIn.hpp b/include/alpaka/warp/WarpUniformCudaHipBuiltIn.hpp index 3a6d495b07e4..692c8d7284ff 100644 --- a/include/alpaka/warp/WarpUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/warp/WarpUniformCudaHipBuiltIn.hpp @@ -21,11 +21,11 @@ namespace alpaka::warp # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/include/alpaka/workdiv/WorkDivHelpers.hpp b/include/alpaka/workdiv/WorkDivHelpers.hpp index c15319cf6f7a..d7bd760836b8 100644 --- a/include/alpaka/workdiv/WorkDivHelpers.hpp +++ b/include/alpaka/workdiv/WorkDivHelpers.hpp @@ -22,7 +22,7 @@ #include #include -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wswitch-default" #endif @@ -549,6 +549,6 @@ namespace alpaka } } // namespace alpaka -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif diff --git a/include/alpaka/workdiv/WorkDivUniformCudaHipBuiltIn.hpp b/include/alpaka/workdiv/WorkDivUniformCudaHipBuiltIn.hpp index 891526784681..f70a4feed729 100644 --- a/include/alpaka/workdiv/WorkDivUniformCudaHipBuiltIn.hpp +++ b/include/alpaka/workdiv/WorkDivUniformCudaHipBuiltIn.hpp @@ -35,11 +35,11 @@ namespace alpaka # if !defined(ALPAKA_HOST_ONLY) -# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !BOOST_LANG_CUDA +# if defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && !ALPAKA_LANG_CUDA # error If ALPAKA_ACC_GPU_CUDA_ENABLED is set, the compiler has to support CUDA! # endif -# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !BOOST_LANG_HIP +# if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && !ALPAKA_LANG_HIP # error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP! # endif diff --git a/script/job_generator/alpaka_filter.py b/script/job_generator/alpaka_filter.py index 277baef9ddb0..dec8df9352a4 100644 --- a/script/job_generator/alpaka_filter.py +++ b/script/job_generator/alpaka_filter.py @@ -112,27 +112,18 @@ def alpaka_post_filter(row: List) -> bool: # several bugs will be fixed in alpaka 2.0.0 if ( row_check_name(row, DEVICE_COMPILER, "==", CLANG_CUDA) - and ( - row_check_version(row, DEVICE_COMPILER, "==", "18") - or row_check_version(row, DEVICE_COMPILER, "==", "19") - ) + and (row_check_version(row, DEVICE_COMPILER, "==", "18") or row_check_version(row, DEVICE_COMPILER, "==", "19")) and row_check_backend_version(row, ALPAKA_ACC_GPU_CUDA_ENABLE, "!=", OFF_VER) ): return False - if row_check_name(row, DEVICE_COMPILER, "==", NVCC) and row_check_name( - row, HOST_COMPILER, "==", CLANG - ): + if row_check_name(row, DEVICE_COMPILER, "==", NVCC) and row_check_name(row, HOST_COMPILER, "==", CLANG): # nvcc 12.5 is the minimum requirement for host compiler Clang 18 - if row_check_version(row, HOST_COMPILER, "==", "18") and row_check_version( - row, DEVICE_COMPILER, "<=", "12.5" - ): + if row_check_version(row, HOST_COMPILER, "==", "18") and row_check_version(row, DEVICE_COMPILER, "<=", "12.5"): return False # no released nvcc version supports Clang 19 yet (latest release was CUDA 12.6) - if row_check_version(row, HOST_COMPILER, "==", "19") and row_check_version( - row, DEVICE_COMPILER, "<=", "12.6" - ): + if row_check_version(row, HOST_COMPILER, "==", "19") and row_check_version(row, DEVICE_COMPILER, "<=", "12.6"): return False return True diff --git a/test/integ/cudaOnly/src/cudaNativeFunctions.cpp b/test/integ/cudaOnly/src/cudaNativeFunctions.cpp index b91ac9fa1f01..84154881c705 100644 --- a/test/integ/cudaOnly/src/cudaNativeFunctions.cpp +++ b/test/integ/cudaOnly/src/cudaNativeFunctions.cpp @@ -7,10 +7,10 @@ #include -#if defined(ALPAKA_ACC_GPU_CUDA_ONLY_MODE) && defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA +#if defined(ALPAKA_ACC_GPU_CUDA_ONLY_MODE) && defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA //! Native CUDA function. -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wmissing-prototypes" # endif @@ -18,7 +18,7 @@ __device__ auto userDefinedThreadFence() -> void { __threadfence(); } -# if BOOST_COMP_CLANG +# if ALPAKA_COMP_CLANG # pragma clang diagnostic pop # endif diff --git a/test/unit/atomic/src/AtomicTest.cpp b/test/unit/atomic/src/AtomicTest.cpp index 467dab651741..613b0d6d138e 100644 --- a/test/unit/atomic/src/AtomicTest.cpp +++ b/test/unit/atomic/src/AtomicTest.cpp @@ -342,7 +342,8 @@ class AtomicTestKernel>> } }; -#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA) || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && BOOST_LANG_HIP) +#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA) \ + || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && ALPAKA_LANG_HIP) template class AtomicTestKernel< diff --git a/test/unit/block/shared/src/BlockSharedMemSt.cpp b/test/unit/block/shared/src/BlockSharedMemSt.cpp index c2ec83216d57..297f24f51359 100644 --- a/test/unit/block/shared/src/BlockSharedMemSt.cpp +++ b/test/unit/block/shared/src/BlockSharedMemSt.cpp @@ -18,7 +18,7 @@ class BlockSharedMemStNonNullTestKernel template ALPAKA_FN_ACC auto operator()(TAcc const& acc, bool* success) const -> void { -#if BOOST_COMP_GNUC >= BOOST_VERSION_NUMBER(6, 0, 0) +#if ALPAKA_COMP_GNUC >= ALPAKA_VERSION_NUMBER(6, 0, 0) # pragma GCC diagnostic push # pragma GCC diagnostic ignored \ "-Waddress" // warning: the compiler can assume that the address of 'a' will never be NULL [-Waddress] @@ -51,7 +51,7 @@ class BlockSharedMemStNonNullTestKernel auto& h = alpaka::declareSharedVar, __COUNTER__>(acc); ALPAKA_CHECK(*success, static_cast(nullptr) != &h[0]); } -#if BOOST_COMP_GNUC >= BOOST_VERSION_NUMBER(6, 0, 0) +#if ALPAKA_COMP_GNUC >= ALPAKA_VERSION_NUMBER(6, 0, 0) # pragma GCC diagnostic pop #endif } diff --git a/test/unit/core/src/BoostPredefTest.cpp b/test/unit/core/src/BoostPredefTest.cpp index f143fedf5af7..fb08cf659d2e 100644 --- a/test/unit/core/src/BoostPredefTest.cpp +++ b/test/unit/core/src/BoostPredefTest.cpp @@ -10,49 +10,37 @@ TEST_CASE("printDefines", "[core]") { -#if BOOST_LANG_CUDA - std::cout << "BOOST_LANG_CUDA:" << BOOST_LANG_CUDA << std::endl; +#if ALPAKA_LANG_CUDA + std::cout << "ALPAKA_LANG_CUDA:" << ALPAKA_LANG_CUDA << std::endl; #endif -#if BOOST_LANG_HIP - std::cout << "BOOST_LANG_HIP:" << BOOST_LANG_HIP << std::endl; +#if ALPAKA_LANG_HIP + std::cout << "ALPAKA_LANG_HIP:" << ALPAKA_LANG_HIP << std::endl; #endif -#if BOOST_ARCH_PTX - std::cout << "BOOST_ARCH_PTX:" << BOOST_ARCH_PTX << std::endl; +#if ALPAKA_COMP_NVCC + std::cout << "ALPAKA_COMP_NVCC:" << ALPAKA_COMP_NVCC << std::endl; #endif -#if BOOST_ARCH_HSA - std::cout << "BOOST_ARCH_HSA:" << BOOST_ARCH_HSA << std::endl; +#if ALPAKA_COMP_HIP + std::cout << "ALPAKA_COMP_HIP:" << ALPAKA_COMP_HIP << std::endl; #endif -#if BOOST_COMP_NVCC - std::cout << "BOOST_COMP_NVCC:" << BOOST_COMP_NVCC << std::endl; +#if ALPAKA_COMP_CLANG + std::cout << "ALPAKA_COMP_CLANG:" << ALPAKA_COMP_CLANG << std::endl; #endif -#if BOOST_COMP_HIP - std::cout << "BOOST_COMP_HIP:" << BOOST_COMP_HIP << std::endl; +#if ALPAKA_COMP_GNUC + std::cout << "ALPAKA_COMP_GNUC:" << ALPAKA_COMP_GNUC << std::endl; #endif -#if BOOST_COMP_CLANG - std::cout << "BOOST_COMP_CLANG:" << BOOST_COMP_CLANG << std::endl; +#if ALPAKA_COMP_MSVC + std::cout << "ALPAKA_COMP_MSVC:" << ALPAKA_COMP_MSVC << std::endl; #endif -#if BOOST_COMP_GNUC - std::cout << "BOOST_COMP_GNUC:" << BOOST_COMP_GNUC << std::endl; +#if ALPAKA_COMP_CLANG_CUDA + std::cout << "ALPAKA_COMP_CLANG_CUDA:" << ALPAKA_COMP_CLANG_CUDA << std::endl; #endif -#if BOOST_COMP_INTEL - std::cout << "BOOST_COMP_INTEL:" << BOOST_COMP_INTEL << std::endl; +#if ALPAKA_COMP_ICPX + std::cout << "ALPAKA_COMP_ICPX:" << ALPAKA_COMP_ICPX << std::endl; #endif -#if BOOST_COMP_MSVC - std::cout << "BOOST_COMP_MSVC:" << BOOST_COMP_MSVC << std::endl; +#if ALPAKA_COMP_PGI + std::cout << "ALPAKA_COMP_PGI:" << ALPAKA_COMP_PGI << std::endl; #endif -#if defined(BOOST_COMP_MSVC_EMULATED) - std::cout << "BOOST_COMP_MSVC_EMULATED:" << BOOST_COMP_MSVC_EMULATED << std::endl; -#endif -#if BOOST_COMP_CLANG_CUDA - std::cout << "BOOST_COMP_CLANG_CUDA:" << BOOST_COMP_CLANG_CUDA << std::endl; -#endif -#if BOOST_LIB_STD_GNU - std::cout << "BOOST_LIB_STD_GNU:" << BOOST_LIB_STD_GNU << std::endl; -#endif -#if BOOST_LIB_STD_CXX - std::cout << "BOOST_LIB_STD_CXX:" << BOOST_LIB_STD_CXX << std::endl; -#endif -#if BOOST_LIB_STD_DINKUMWARE - std::cout << "BOOST_LIB_STD_DINKUMWARE:" << BOOST_LIB_STD_DINKUMWARE << std::endl; +#if ALPAKA_COMP_IBM + std::cout << "ALPAKA_COMP_IBM:" << ALPAKA_COMP_IBM << std::endl; #endif } diff --git a/test/unit/exec/CMakeLists.txt b/test/unit/exec/CMakeLists.txt index 44a06a2be071..6653869cc1ee 100644 --- a/test/unit/exec/CMakeLists.txt +++ b/test/unit/exec/CMakeLists.txt @@ -11,10 +11,6 @@ alpaka_add_executable( ${_FILES_SOURCE} ${_FILES_HEADER}) -target_include_directories( - ${_TARGET_NAME} - PRIVATE ${Boost_INCLUDE_DIRS}) - target_link_libraries( ${_TARGET_NAME} PRIVATE common) diff --git a/test/unit/exec/src/IndependentElements.cpp b/test/unit/exec/src/IndependentElements.cpp index 18621f07b0d7..36036245604b 100644 --- a/test/unit/exec/src/IndependentElements.cpp +++ b/test/unit/exec/src/IndependentElements.cpp @@ -23,19 +23,19 @@ #include #include -#if BOOST_COMP_MSVC +#if ALPAKA_COMP_MSVC // MSVC uses __restrict instead of __restrict__. # define __restrict__ __restrict #endif -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wexit-time-destructors" #endif // Global Host object used by all tests. using Host = alpaka::DevCpu; static Host host = alpaka::getDevByIdx(alpaka::PlatformCpu{}, 0); -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif diff --git a/test/unit/exec/src/UniformElements.cpp b/test/unit/exec/src/UniformElements.cpp index 0e080171be1d..17a2389fe872 100644 --- a/test/unit/exec/src/UniformElements.cpp +++ b/test/unit/exec/src/UniformElements.cpp @@ -25,19 +25,19 @@ #include #include -#if BOOST_COMP_MSVC +#if ALPAKA_COMP_MSVC // MSVC uses __restrict instead of __restrict__. # define __restrict__ __restrict #endif -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wexit-time-destructors" #endif // Global Host object used by all tests. using Host = alpaka::DevCpu; static Host host = alpaka::getDevByIdx(alpaka::PlatformCpu{}, 0); -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif diff --git a/test/unit/kernel/src/KernelLambda.cpp b/test/unit/kernel/src/KernelLambda.cpp index d07ba66b74b8..b50c3306b9d7 100644 --- a/test/unit/kernel/src/KernelLambda.cpp +++ b/test/unit/kernel/src/KernelLambda.cpp @@ -22,7 +22,7 @@ struct TestTemplateLambda alpaka::test::KernelExecutionFixture fixture(alpaka::Vec::ones()); -# if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +# if ALPAKA_COMP_MSVC # pragma warning(push) # pragma warning(disable : 4702) // warning C4702: unreachable code # endif @@ -32,7 +32,7 @@ struct TestTemplateLambda *success, static_cast>(1) == (alpaka::getWorkDiv(acc)).prod()); }; -# if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +# if ALPAKA_COMP_MSVC # pragma warning(pop) # endif diff --git a/test/unit/kernel/src/KernelWithHostConstexpr.cpp b/test/unit/kernel/src/KernelWithHostConstexpr.cpp index 805891f720f2..b86c1f93c0a9 100644 --- a/test/unit/kernel/src/KernelWithHostConstexpr.cpp +++ b/test/unit/kernel/src/KernelWithHostConstexpr.cpp @@ -19,7 +19,7 @@ class KernelWithHostConstexpr template ALPAKA_FN_ACC auto operator()(TAcc const& /* acc */, bool* success) const -> void { -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(push) # pragma warning(disable : 4127) // warning C4127: conditional expression is constant #endif @@ -27,7 +27,7 @@ class KernelWithHostConstexpr constexpr auto max = std::numeric_limits::max(); ALPAKA_CHECK(*success, 0 != max); -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(pop) #endif } diff --git a/test/unit/kernel/src/KernelWithoutTemplatedAccParam.cpp b/test/unit/kernel/src/KernelWithoutTemplatedAccParam.cpp index dc7cfc072bb6..94afd5ea8cb3 100644 --- a/test/unit/kernel/src/KernelWithoutTemplatedAccParam.cpp +++ b/test/unit/kernel/src/KernelWithoutTemplatedAccParam.cpp @@ -12,9 +12,9 @@ using Idx = std::uint32_t; #if defined(ALPAKA_ACC_CPU_SERIAL_ENABLED) using AccCpu = alpaka::AccCpuSerial; #endif -#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && BOOST_LANG_HIP +#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && ALPAKA_LANG_HIP using AccGpu = alpaka::AccGpuHipRt; -#elif defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA +#elif defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA using AccGpu = alpaka::AccGpuCudaRt; #endif @@ -39,7 +39,8 @@ TEST_CASE("kernelNoTemplateCpu", "[kernel]") } #endif -#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA) || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && BOOST_LANG_HIP) +#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA) \ + || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && ALPAKA_LANG_HIP) struct KernelNoTemplateGpu { ALPAKA_FN_ACC @@ -83,7 +84,8 @@ TEST_CASE("kernelUnusedTemplateParamCpu", "[kernel]") } #endif -#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && BOOST_LANG_CUDA) || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && BOOST_LANG_HIP) +#if(defined(ALPAKA_ACC_GPU_CUDA_ENABLED) && ALPAKA_LANG_CUDA) \ + || (defined(ALPAKA_ACC_GPU_HIP_ENABLED) && ALPAKA_LANG_HIP) struct KernelUnusedTemplateParamGpu { template diff --git a/test/unit/math/CMakeLists.txt b/test/unit/math/CMakeLists.txt index 6019d13fd143..d12619fdb04f 100644 --- a/test/unit/math/CMakeLists.txt +++ b/test/unit/math/CMakeLists.txt @@ -17,9 +17,6 @@ alpaka_add_executable( ${_TARGET_NAME} ${_FILES_SOURCE} ${_FILES_HEADER}) -target_include_directories( - ${_TARGET_NAME} - PRIVATE ${Boost_INCLUDE_DIRS}) target_link_libraries( ${_TARGET_NAME} PRIVATE common) diff --git a/test/unit/math/src/DataGen.hpp b/test/unit/math/src/DataGen.hpp index 8b35cbee5fc5..500a0d21daa3 100644 --- a/test/unit/math/src/DataGen.hpp +++ b/test/unit/math/src/DataGen.hpp @@ -11,7 +11,7 @@ #include #include -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wswitch-default" #endif @@ -205,6 +205,6 @@ namespace mathtest } } // namespace mathtest -#if BOOST_COMP_CLANG +#if ALPAKA_COMP_CLANG # pragma clang diagnostic pop #endif diff --git a/test/unit/mem/copy/src/BufSlicing.cpp b/test/unit/mem/copy/src/BufSlicing.cpp index 6169fdaf5ff2..551c4ef42424 100644 --- a/test/unit/mem/copy/src/BufSlicing.cpp +++ b/test/unit/mem/copy/src/BufSlicing.cpp @@ -12,7 +12,7 @@ #include #include -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(push) # pragma warning(disable : 4127) // suppress warning for c++17 conditional expression is constant #endif @@ -209,6 +209,6 @@ TEMPLATE_LIST_TEST_CASE("memBufSlicingMemsetTest", "[memBuf]", TestAccWithDataTy } } -#if BOOST_COMP_MSVC || defined(BOOST_COMP_MSVC_EMULATED) +#if ALPAKA_COMP_MSVC # pragma warning(pop) #endif diff --git a/test/unit/mem/view/src/MdSpan.cpp b/test/unit/mem/view/src/MdSpan.cpp index af64e58bbf30..8b7a305662a4 100644 --- a/test/unit/mem/view/src/MdSpan.cpp +++ b/test/unit/mem/view/src/MdSpan.cpp @@ -26,7 +26,7 @@ namespace using make_reverse_index_sequence = decltype(make_reverse_index_sequence_impl(std::make_index_sequence{})); } // namespace -# if BOOST_COMP_NVCC +# if ALPAKA_COMP_NVCC # define NOEXCEPT_UNLESS_NVCC # else # define NOEXCEPT_UNLESS_NVCC noexcept diff --git a/test/unit/mem/view/src/ViewPlainPtrTest.cpp b/test/unit/mem/view/src/ViewPlainPtrTest.cpp index dcc38b096bfd..a30d10da44f5 100644 --- a/test/unit/mem/view/src/ViewPlainPtrTest.cpp +++ b/test/unit/mem/view/src/ViewPlainPtrTest.cpp @@ -16,7 +16,7 @@ #include #include -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push // "cast from 'std::uint8_t*' to 'Elem*' increases required alignment of target type" # pragma GCC diagnostic ignored "-Wcast-align" @@ -91,7 +91,7 @@ namespace alpaka::test CHECK(alpaka::getPtrNative(viewMove) == nativePtr2); } } // namespace alpaka::test -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif diff --git a/test/unit/mem/view/src/ViewSubViewTest.cpp b/test/unit/mem/view/src/ViewSubViewTest.cpp index 6da699082dbd..df5804fe1411 100644 --- a/test/unit/mem/view/src/ViewSubViewTest.cpp +++ b/test/unit/mem/view/src/ViewSubViewTest.cpp @@ -16,7 +16,7 @@ #include #include -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored \ "-Wcast-align" // "cast from 'std::uint8_t*' to 'Elem*' increases required alignment of target type" @@ -130,7 +130,7 @@ namespace alpaka::test alpaka::test::testViewSubViewImmutable(view, buf, dev, viewExtent, viewOffset); } } // namespace alpaka::test -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif diff --git a/test/unit/rand/src/RandTest.cpp b/test/unit/rand/src/RandTest.cpp index 05311d438965..9797560a75cf 100644 --- a/test/unit/rand/src/RandTest.cpp +++ b/test/unit/rand/src/RandTest.cpp @@ -21,14 +21,14 @@ class RandTestKernel { auto dist = alpaka::rand::distribution::createNormalReal(acc); [[maybe_unused]] auto const r = dist(gen); - if constexpr(!BOOST_ARCH_PTX) + if constexpr(!ALPAKA_ARCH_PTX) ALPAKA_CHECK(*success, std::isfinite(r)); } { auto dist = alpaka::rand::distribution::createNormalReal(acc); [[maybe_unused]] auto const r = dist(gen); - if constexpr(!BOOST_ARCH_PTX) + if constexpr(!ALPAKA_ARCH_PTX) ALPAKA_CHECK(*success, std::isfinite(r)); } { @@ -114,7 +114,7 @@ TEMPLATE_LIST_TEST_CASE("defaultRandomGeneratorIsTriviallyCopyable", "[rand]", a // This causes alpaka rand state for the HIP accelerator and those versions to also not be trivially copyable. // It was fixed on AMD side in https://github.com/ROCmSoftwarePlatform/rocRAND/pull/252. // Thus we guard the test to skip HIP accelerator and older HIP versions. -#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && (BOOST_LANG_HIP < BOOST_VERSION_NUMBER(5, 2, 0)) +#if defined(ALPAKA_ACC_GPU_HIP_ENABLED) && (ALPAKA_LANG_HIP < ALPAKA_VERSION_NUMBER(5, 2, 0)) if constexpr(!IsAccHIP::value) STATIC_REQUIRE(isEngineTriviallyCopyable); #else diff --git a/test/unit/warp/src/ShflDown.cpp b/test/unit/warp/src/ShflDown.cpp index 3663dc239829..ba173ae39634 100644 --- a/test/unit/warp/src/ShflDown.cpp +++ b/test/unit/warp/src/ShflDown.cpp @@ -19,7 +19,7 @@ #include #include -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wstrict-overflow" #endif @@ -170,6 +170,6 @@ TEMPLATE_LIST_TEST_CASE("shfl_down", "[warp]", alpaka::test::TestAccs) } } -#if BOOST_COMP_GNUC +#if ALPAKA_COMP_GNUC # pragma GCC diagnostic pop #endif diff --git a/test/unit/workDiv/src/FooVec.hpp b/test/unit/workDiv/src/FooVec.hpp index f26376ede389..cb8e0db543a5 100644 --- a/test/unit/workDiv/src/FooVec.hpp +++ b/test/unit/workDiv/src/FooVec.hpp @@ -95,7 +95,7 @@ namespace alpaka::trait -> alpaka::Vec, TVal> { alpaka::Vec, TVal> v{}; -#if BOOST_COMP_NVCC && BOOST_COMP_NVCC < BOOST_VERSION_NUMBER(11, 3, 0) +#if ALPAKA_COMP_NVCC && ALPAKA_COMP_NVCC < ALPAKA_VERSION_NUMBER(11, 3, 0) if(DimInt::value > 0) #else if constexpr(DimInt::value > 0)