diff --git a/CMakeLists.txt b/CMakeLists.txt index db08624..a3c2639 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,6 +82,7 @@ else() option(WHISPER_CUBLAS "whisper: support for CUDA (deprecated)" OFF) option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF) option(WHISPER_CLBLAST "whisper: use CLBlast" OFF) + option(WHISPER_MKL "whisper: use Intel Math Kernel Library (MKL)" OFF) option(WHISPER_SYCL "whisper: use SYCL" OFF) option(WHISPER_SYCL_F16 "whisper: use 16 bit floats for sycl calculations" OFF) endif() @@ -296,6 +297,13 @@ if (WHISPER_BLAS) endif () endif () +if (WHISPER_MKL) + find_package(MKL CONFIG REQUIRED PATHS $ENV{MKLROOT}) + message(STATUS "Imported oneMKL targets: ${MKL_IMPORTED_TARGETS}") + set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS) + set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_BLAS_USE_MKL) +endif() + if (WHISPER_CUBLAS) message(WARNING "WHISPER_CUBLAS is deprecated and will be removed in the future.\nUse WHISPER_CUDA instead") set(WHISPER_CUDA ON) @@ -630,6 +638,10 @@ if (WHISPER_OPENVINO) target_link_libraries(${TARGET} PRIVATE whisper.openvino) endif() +if (WHISPER_MKL) + target_link_libraries(${TARGET} PUBLIC MKL::MKL) +endif() + if (MSVC) target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT}) diff --git a/README.md b/README.md index 96a8dd7..6b6ea67 100644 --- a/README.md +++ b/README.md @@ -455,6 +455,21 @@ make clean WHISPER_OPENBLAS=1 make -j ``` +## BLAS CPU support via Intel MKL + +Encoder processing can be accelerated on the CPU via the BLAS compatible interface of Intel's Math Kernel Library. +First, make sure you have installed Intel's MKL runtime and development packages: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-download.html + +Now build `whisper.cpp` with Intel MKL BLAS support: + +``` +source /opt/intel/oneapi/setvars.sh +mkdir build +cd build +cmake -DWHISPER_MKL=ON .. +WHISPER_MKL=1 make -j +``` + ## Docker ### Prerequisites