aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-15 00:02:12 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-15 00:02:12 +0000
commit2bd9563614dbdfe3f63580bdf4a574ecb0d60096 (patch)
tree0a031ffb45338947eace1b5a83c67c16c196f968
parent7c44d52f24ec6160ed832c4b51139819f844ad0f (diff)
parentde61e9eef348b6d84137b4648cac511e1b232027 (diff)
downloadpthreadpool-android12-mainline-wifi-release.tar.gz
Change-Id: Ifa50ef3a35471e38ee9098e383fa607984435c62
-rw-r--r--.gitignore12
-rw-r--r--Android.bp39
-rw-r--r--BUILD.bazel404
-rw-r--r--CMakeLists.txt61
-rw-r--r--METADATA14
l---------NOTICE1
-rw-r--r--README.md8
-rw-r--r--WORKSPACE38
-rw-r--r--bench/latency.cc5
-rw-r--r--bench/throughput.cc322
-rw-r--r--cmake/DownloadCpuinfo.cmake15
-rwxr-xr-xconfigure.py10
-rw-r--r--include/pthreadpool.h972
-rw-r--r--src/fastpath.c1327
-rw-r--r--src/gcd.c136
-rw-r--r--src/legacy-api.c (renamed from src/threadpool-legacy.c)15
-rw-r--r--src/memory.c66
-rw-r--r--src/portable-api.c2384
-rw-r--r--src/pthreads.c461
-rw-r--r--src/shim.c472
-rw-r--r--src/threadpool-atomics.h832
-rw-r--r--src/threadpool-common.h75
-rw-r--r--src/threadpool-object.h812
-rw-r--r--src/threadpool-pthreads.c1209
-rw-r--r--src/threadpool-shim.c195
-rw-r--r--src/threadpool-utils.h98
-rw-r--r--src/windows.c364
-rw-r--r--test/pthreadpool.cc4546
28 files changed, 13389 insertions, 1504 deletions
diff --git a/.gitignore b/.gitignore
index 8a4bd32..0d8a9fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,12 +2,18 @@
build.ninja
# Build objects and artifacts
-deps/
-build/
+bazel-bin
+bazel-genfiles
+bazel-out
+bazel-testlogs
+bazel-pthreadpool
bin/
-obj/
+build/
+build-*/
+deps/
lib/
libs/
+obj/
*.pyc
*.pyo
diff --git a/Android.bp b/Android.bp
index d366438..a027498 100644
--- a/Android.bp
+++ b/Android.bp
@@ -12,24 +12,51 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ default_applicable_licenses: ["external_pthreadpool_license"],
+}
+
+// Added automatically by a large-scale-change
+// See: http://go/android-license-faq
+license {
+ name: "external_pthreadpool_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-BSD",
+ ],
+ license_text: [
+ "LICENSE",
+ ],
+}
+
cc_library_static {
name: "libpthreadpool",
export_include_dirs: ["include"],
vendor_available: true,
sdk_version: "current",
srcs: [
- "src/threadpool-pthreads.c",
- "src/threadpool-legacy.c",
+ "src/memory.c",
+ "src/portable-api.c",
+ "src/pthreads.c",
],
cflags: [
- "-std=gnu11",
"-O2",
"-Wno-deprecated-declarations",
"-Wno-missing-field-initializers",
+ "-DPTHREADPOOL_USE_CPUINFO=1",
+ "-DPTHREADPOOL_USE_CONDVAR=1",
],
+ c_std: "gnu11",
header_libs: [
"fxdiv_headers",
],
+ shared_libs: [
+ "liblog",
+ ],
+ static_libs: [
+ "libcpuinfo",
+ "libclog",
+ ],
}
cc_test {
@@ -43,7 +70,12 @@ cc_test {
"-Wno-missing-field-initializers",
],
stl: "libc++_static",
+ shared_libs: [
+ "liblog",
+ ],
static_libs: [
+ "libclog",
+ "libcpuinfo",
"libgmock_ndk",
"libpthreadpool",
],
@@ -51,4 +83,3 @@ cc_test {
"general-tests",
],
}
-
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..0b832cf
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,404 @@
+load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
+
+licenses(["notice"])
+
+############################## pthreadpool library #############################
+
+INTERNAL_HDRS = [
+ "src/threadpool-atomics.h",
+ "src/threadpool-common.h",
+ "src/threadpool-object.h",
+ "src/threadpool-utils.h",
+]
+
+PORTABLE_SRCS = [
+ "src/memory.c",
+ "src/portable-api.c",
+]
+
+ARCH_SPECIFIC_SRCS = [
+ "src/fastpath.c",
+]
+
+PTHREADS_IMPL_SRCS = PORTABLE_SRCS + ["src/pthreads.c"]
+
+GCD_IMPL_SRCS = PORTABLE_SRCS + ["src/gcd.c"]
+
+WINDOWS_IMPL_SRCS = PORTABLE_SRCS + ["src/windows.c"]
+
+SHIM_IMPL_SRCS = ["src/shim.c"]
+
+cc_library(
+ name = "pthreadpool",
+ srcs = select({
+ ":pthreadpool_sync_primitive_explicit_condvar": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
+ ":pthreadpool_sync_primitive_explicit_futex": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
+ ":pthreadpool_sync_primitive_explicit_gcd": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":pthreadpool_sync_primitive_explicit_event": INTERNAL_HDRS + WINDOWS_IMPL_SRCS,
+ ":emscripten_with_threads": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
+ ":emscripten": INTERNAL_HDRS + SHIM_IMPL_SRCS,
+ ":macos_x86": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":macos_x86_64": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":ios": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":watchos": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":tvos": INTERNAL_HDRS + GCD_IMPL_SRCS,
+ ":windows_x86_64": INTERNAL_HDRS + WINDOWS_IMPL_SRCS,
+ "//conditions:default": INTERNAL_HDRS + PTHREADS_IMPL_SRCS,
+ }) + select({
+ ":linux_x86_64": ARCH_SPECIFIC_SRCS,
+ ":android_x86": ARCH_SPECIFIC_SRCS,
+ ":android_x86_64": ARCH_SPECIFIC_SRCS,
+ ":windows_x86_64": ARCH_SPECIFIC_SRCS,
+ ":macos_x86": ARCH_SPECIFIC_SRCS,
+ ":macos_x86_64": ARCH_SPECIFIC_SRCS,
+ ":ios_x86": ARCH_SPECIFIC_SRCS,
+ ":ios_x86_64": ARCH_SPECIFIC_SRCS,
+ ":watchos_x86": ARCH_SPECIFIC_SRCS,
+ ":watchos_x86_64": ARCH_SPECIFIC_SRCS,
+ ":tvos_x86_64": ARCH_SPECIFIC_SRCS,
+ "//conditions:default": [],
+ }),
+ copts = [
+ "-std=gnu11",
+ ] + select({
+ ":optimized_build": ["-O2"],
+ "//conditions:default": [],
+ }) + select({
+ ":linux_arm": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_armeabi": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_armhf": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_armv7a": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":linux_aarch64": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":android_armv7": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ ":android_arm64": ["-DPTHREADPOOL_USE_CPUINFO=1"],
+ "//conditions:default": ["-DPTHREADPOOL_USE_CPUINFO=0"],
+ }) + select({
+ ":pthreadpool_sync_primitive_explicit_condvar": [
+ "-DPTHREADPOOL_USE_CONDVAR=1",
+ "-DPTHREADPOOL_USE_FUTEX=0",
+ "-DPTHREADPOOL_USE_GCD=0",
+ "-DPTHREADPOOL_USE_EVENT=0",
+ ],
+ ":pthreadpool_sync_primitive_explicit_futex": [
+ "-DPTHREADPOOL_USE_CONDVAR=0",
+ "-DPTHREADPOOL_USE_FUTEX=1",
+ "-DPTHREADPOOL_USE_GCD=0",
+ "-DPTHREADPOOL_USE_EVENT=0",
+ ],
+ ":pthreadpool_sync_primitive_explicit_gcd": [
+ "-DPTHREADPOOL_USE_CONDVAR=0",
+ "-DPTHREADPOOL_USE_FUTEX=0",
+ "-DPTHREADPOOL_USE_GCD=1",
+ "-DPTHREADPOOL_USE_EVENT=0",
+ ],
+ ":pthreadpool_sync_primitive_explicit_event": [
+ "-DPTHREADPOOL_USE_CONDVAR=0",
+ "-DPTHREADPOOL_USE_FUTEX=0",
+ "-DPTHREADPOOL_USE_GCD=0",
+ "-DPTHREADPOOL_USE_EVENT=1",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ ":linux_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":android_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":android_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":windows_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":macos_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":macos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":ios_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":ios_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":watchos_x86": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":watchos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ ":tvos_x86_64": ["-DPTHREADPOOL_USE_FASTPATH=1"],
+ "//conditions:default": ["-DPTHREADPOOL_USE_FASTPATH=0"],
+ }),
+ hdrs = [
+ "include/pthreadpool.h",
+ ],
+ defines = [
+ "PTHREADPOOL_NO_DEPRECATED_API",
+ ],
+ includes = [
+ "include",
+ ],
+ linkopts = select({
+ ":emscripten_with_threads": [
+ "-s ALLOW_BLOCKING_ON_MAIN_THREAD=1",
+ "-s PTHREAD_POOL_SIZE=8",
+ ],
+ "//conditions:default": [],
+ }),
+ strip_include_prefix = "include",
+ deps = [
+ "@FXdiv",
+ ] + select({
+ ":linux_arm": ["@cpuinfo"],
+ ":linux_armeabi": ["@cpuinfo"],
+ ":linux_armhf": ["@cpuinfo"],
+ ":linux_armv7a": ["@cpuinfo"],
+ ":linux_aarch64": ["@cpuinfo"],
+ ":android_armv7": ["@cpuinfo"],
+ ":android_arm64": ["@cpuinfo"],
+ "//conditions:default": [],
+ }),
+ visibility = ["//visibility:public"],
+)
+
+################################## Unit tests ##################################
+
+EMSCRIPTEN_TEST_LINKOPTS = [
+ "-s ASSERTIONS=2",
+ "-s ERROR_ON_UNDEFINED_SYMBOLS=1",
+ "-s DEMANGLE_SUPPORT=1",
+ "-s EXIT_RUNTIME=1",
+ "-s ALLOW_MEMORY_GROWTH=0",
+ "-s TOTAL_MEMORY=67108864", # 64M
+]
+
+cc_test(
+ name = "pthreadpool_test",
+ srcs = ["test/pthreadpool.cc"],
+ linkopts = select({
+ ":emscripten": EMSCRIPTEN_TEST_LINKOPTS,
+ "//conditions:default": [],
+ }),
+ deps = [
+ ":pthreadpool",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+################################## Benchmarks ##################################
+
+EMSCRIPTEN_BENCHMARK_LINKOPTS = [
+ "-s ASSERTIONS=1",
+ "-s ERROR_ON_UNDEFINED_SYMBOLS=1",
+ "-s EXIT_RUNTIME=1",
+ "-s ALLOW_MEMORY_GROWTH=0",
+]
+
+cc_binary(
+ name = "latency_bench",
+ srcs = ["bench/latency.cc"],
+ linkopts = select({
+ ":emscripten": EMSCRIPTEN_BENCHMARK_LINKOPTS,
+ "//conditions:default": [],
+ }),
+ deps = [
+ ":pthreadpool",
+ "@com_google_benchmark//:benchmark",
+ ],
+)
+
+cc_binary(
+ name = "throughput_bench",
+ srcs = ["bench/throughput.cc"],
+ linkopts = select({
+ ":emscripten": EMSCRIPTEN_BENCHMARK_LINKOPTS,
+ "//conditions:default": [],
+ }),
+ deps = [
+ ":pthreadpool",
+ "@com_google_benchmark//:benchmark",
+ ],
+)
+
+############################# Build configurations #############################
+
+# Synchronize workers using pthreads condition variable.
+config_setting(
+ name = "pthreadpool_sync_primitive_explicit_condvar",
+ define_values = {"pthreadpool_sync_primitive": "condvar"},
+)
+
+# Synchronize workers using futex.
+config_setting(
+ name = "pthreadpool_sync_primitive_explicit_futex",
+ define_values = {"pthreadpool_sync_primitive": "futex"},
+)
+
+# Synchronize workers using Grand Central Dispatch.
+config_setting(
+ name = "pthreadpool_sync_primitive_explicit_gcd",
+ define_values = {"pthreadpool_sync_primitive": "gcd"},
+)
+
+# Synchronize workers using WinAPI event.
+config_setting(
+ name = "pthreadpool_sync_primitive_explicit_event",
+ define_values = {"pthreadpool_sync_primitive": "event"},
+)
+
+config_setting(
+ name = "optimized_build",
+ values = {
+ "compilation_mode": "opt",
+ },
+)
+
+config_setting(
+ name = "linux_x86_64",
+ values = {"cpu": "k8"},
+)
+
+config_setting(
+ name = "linux_arm",
+ values = {"cpu": "arm"},
+)
+
+config_setting(
+ name = "linux_armeabi",
+ values = {"cpu": "armeabi"},
+)
+
+config_setting(
+ name = "linux_armhf",
+ values = {"cpu": "armhf"},
+)
+
+config_setting(
+ name = "linux_armv7a",
+ values = {"cpu": "armv7a"},
+)
+
+config_setting(
+ name = "linux_aarch64",
+ values = {"cpu": "aarch64"},
+)
+
+config_setting(
+ name = "android_x86",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "x86",
+ },
+)
+
+config_setting(
+ name = "android_x86_64",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "x86_64",
+ },
+)
+
+config_setting(
+ name = "android_armv7",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "armeabi-v7a",
+ },
+)
+
+config_setting(
+ name = "android_arm64",
+ values = {
+ "crosstool_top": "//external:android/crosstool",
+ "cpu": "arm64-v8a",
+ },
+)
+
+# Note: we need to individually match x86 and x86-64 macOS rather than use
+# catch-all "apple_platform_type": "macos" because that option defaults to
+# "macos" even when building on Linux!
+config_setting(
+ name = "macos_x86",
+ values = {
+ "apple_platform_type": "macos",
+ "cpu": "darwin",
+ },
+)
+
+config_setting(
+ name = "macos_x86_64",
+ values = {
+ "apple_platform_type": "macos",
+ "cpu": "darwin_x86_64",
+ },
+)
+
+config_setting(
+ name = "ios",
+ values = {
+ "crosstool_top": "@bazel_tools//tools/cpp:toolchain",
+ "apple_platform_type": "ios",
+ },
+)
+
+config_setting(
+ name = "ios_x86",
+ values = {
+ "apple_platform_type": "ios",
+ "cpu": "ios_i386",
+ },
+)
+
+config_setting(
+ name = "ios_x86_64",
+ values = {
+ "apple_platform_type": "ios",
+ "cpu": "ios_x86_64",
+ },
+)
+
+config_setting(
+ name = "watchos",
+ values = {
+ "crosstool_top": "@bazel_tools//tools/cpp:toolchain",
+ "apple_platform_type": "watchos",
+ },
+)
+
+config_setting(
+ name = "watchos_x86",
+ values = {
+ "apple_platform_type": "watchos",
+ "cpu": "watchos_i386",
+ },
+)
+
+config_setting(
+ name = "watchos_x86_64",
+ values = {
+ "apple_platform_type": "watchos",
+ "cpu": "watchos_x86_64",
+ },
+)
+
+config_setting(
+ name = "tvos",
+ values = {
+ "crosstool_top": "@bazel_tools//tools/cpp:toolchain",
+ "apple_platform_type": "tvos",
+ },
+)
+
+config_setting(
+ name = "tvos_x86_64",
+ values = {
+ "apple_platform_type": "tvos",
+ "cpu": "tvos_x86_64",
+ },
+)
+
+config_setting(
+ name = "windows_x86_64",
+ values = {
+ "cpu": "x64_windows",
+ },
+)
+
+config_setting(
+ name = "emscripten",
+ values = {
+ "crosstool_top": "//toolchain:emscripten",
+ }
+)
+
+config_setting(
+ name = "emscripten_with_threads",
+ values = {
+ "crosstool_top": "//toolchain:emscripten",
+ "copt": "-pthread",
+ }
+)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 714325a..0db3264 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,7 +1,5 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
-INCLUDE(GNUInstallDirs)
-
# ---[ Project
PROJECT(pthreadpool C CXX)
@@ -9,6 +7,13 @@ PROJECT(pthreadpool C CXX)
SET(PTHREADPOOL_LIBRARY_TYPE "default" CACHE STRING "Type of library (shared, static, or default) to build")
SET_PROPERTY(CACHE PTHREADPOOL_LIBRARY_TYPE PROPERTY STRINGS default static shared)
OPTION(PTHREADPOOL_ALLOW_DEPRECATED_API "Enable deprecated API functions" ON)
+SET(PTHREADPOOL_SYNC_PRIMITIVE "default" CACHE STRING "Synchronization primitive (condvar, futex, gcd, event, or default) for worker threads")
+SET_PROPERTY(CACHE PTHREADPOOL_SYNC_PRIMITIVE PROPERTY STRINGS default condvar futex gcd event)
+IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
+ OPTION(PTHREADPOOL_ENABLE_FASTPATH "Enable fast path using atomic decrement instead of atomic compare-and-swap" ON)
+ELSE()
+ OPTION(PTHREADPOOL_ENABLE_FASTPATH "Enable fast path using atomic decrement instead of atomic compare-and-swap" OFF)
+ENDIF()
IF("${CMAKE_SOURCE_DIR}" STREQUAL "${PROJECT_SOURCE_DIR}")
OPTION(PTHREADPOOL_BUILD_TESTS "Build pthreadpool unit tests" ON)
OPTION(PTHREADPOOL_BUILD_BENCHMARKS "Build pthreadpool micro-benchmarks" ON)
@@ -18,6 +23,8 @@ ELSE()
ENDIF()
# ---[ CMake options
+INCLUDE(GNUInstallDirs)
+
IF(PTHREADPOOL_BUILD_TESTS)
ENABLE_TESTING()
ENDIF()
@@ -61,20 +68,25 @@ ENDIF()
# ---[ pthreadpool library
IF(PTHREADPOOL_ALLOW_DEPRECATED_API)
- SET(PTHREADPOOL_SRCS src/threadpool-legacy.c)
+ SET(PTHREADPOOL_SRCS src/legacy-api.c)
ENDIF()
-IF(CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
- LIST(APPEND PTHREADPOOL_SRCS src/threadpool-shim.c)
+IF(EMSCRIPTEN)
+ LIST(APPEND PTHREADPOOL_SRCS src/shim.c)
ELSE()
- LIST(APPEND PTHREADPOOL_SRCS src/threadpool-pthreads.c)
+ LIST(APPEND PTHREADPOOL_SRCS src/portable-api.c src/memory.c)
+ IF(APPLE AND (PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "default" OR PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "gcd"))
+ LIST(APPEND PTHREADPOOL_SRCS src/gcd.c)
+ ELSEIF(CMAKE_SYSTEM_NAME MATCHES "^(Windows|CYGWIN|MSYS)$" AND (PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "default" OR PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "event"))
+ LIST(APPEND PTHREADPOOL_SRCS src/windows.c)
+ ELSE()
+ LIST(APPEND PTHREADPOOL_SRCS src/pthreads.c)
+ ENDIF()
+ IF(PTHREADPOOL_ENABLE_FASTPATH)
+ LIST(APPEND PTHREADPOOL_SRCS src/fastpath.c)
+ ENDIF()
ENDIF()
-IF(${CMAKE_VERSION} VERSION_LESS "3.0")
- ADD_LIBRARY(pthreadpool_interface STATIC include/pthreadpool.h)
- SET_TARGET_PROPERTIES(pthreadpool_interface PROPERTIES LINKER_LANGUAGE C)
-ELSE()
- ADD_LIBRARY(pthreadpool_interface INTERFACE)
-ENDIF()
+ADD_LIBRARY(pthreadpool_interface INTERFACE)
TARGET_INCLUDE_DIRECTORIES(pthreadpool_interface INTERFACE include)
IF(NOT PTHREADPOOL_ALLOW_DEPRECATED_API)
TARGET_COMPILE_DEFINITIONS(pthreadpool_interface INTERFACE PTHREADPOOL_NO_DEPRECATED_API=1)
@@ -91,6 +103,31 @@ ELSE()
MESSAGE(FATAL_ERROR "Unsupported library type ${PTHREADPOOL_LIBRARY_TYPE}")
ENDIF()
+IF(PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "condvar")
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_FUTEX=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_GCD=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_EVENT=0)
+ELSEIF(PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "futex")
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_FUTEX=1)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_GCD=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_EVENT=0)
+ELSEIF(PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "gcd")
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_FUTEX=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_GCD=1)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_EVENT=0)
+ELSEIF(PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "event")
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_FUTEX=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_GCD=0)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_EVENT=1)
+ELSEIF(NOT PTHREADPOOL_SYNC_PRIMITIVE STREQUAL "default")
+ MESSAGE(FATAL_ERROR "Unsupported synchronization primitive ${PTHREADPOOL_SYNC_PRIMITIVE}")
+ENDIF()
+IF(PTHREADPOOL_ENABLE_FASTPATH)
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_ENABLE_FASTPATH=1)
+ELSE()
+ TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_ENABLE_FASTPATH=0)
+ENDIF()
+
SET_TARGET_PROPERTIES(pthreadpool PROPERTIES
C_STANDARD 11
C_EXTENSIONS NO)
diff --git a/METADATA b/METADATA
index 4df42c6..b17869f 100644
--- a/METADATA
+++ b/METADATA
@@ -1,9 +1,5 @@
name: "pthreadpool"
-description:
- "pthreadpool is a portable and efficient thread pool implementation. It "
- "provides similar functionality to #pragma omp parallel for, but with "
- "additional features."
-
+description: "pthreadpool is a portable and efficient thread pool implementation. It provides similar functionality to #pragma omp parallel for, but with additional features."
third_party {
url {
type: HOMEPAGE
@@ -13,7 +9,11 @@ third_party {
type: GIT
value: "https://github.com/Maratyszcza/pthreadpool"
}
- version: "d465747660ecf9ebbaddf8c3db37e4a13d0c9103"
- last_upgrade_date { year: 2020 month: 2 day: 3 }
+ version: "344531b40881b1ee41508a9c70c8fbbef3bd6cad"
license_type: NOTICE
+ last_upgrade_date {
+ year: 2020
+ month: 12
+ day: 7
+ }
}
diff --git a/NOTICE b/NOTICE
deleted file mode 120000
index 7a694c9..0000000
--- a/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-LICENSE \ No newline at end of file
diff --git a/README.md b/README.md
index 3faafaa..57ed3d4 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ It provides similar functionality to `#pragma omp parallel for`, but with additi
* Run on user-specified or auto-detected number of threads.
* Work-stealing scheduling for efficient work balancing.
* Wait-free synchronization of work items.
-* Compatible with Linux (including Android), macOS, iOS, Emscripten, Native Client environments.
+* Compatible with Linux (including Android), macOS, iOS, Windows, Emscripten environments.
* 100% unit tests coverage.
* Throughput and latency microbenchmarks.
@@ -35,17 +35,17 @@ int main() {
pthreadpool_t threadpool = pthreadpool_create(0);
assert(threadpool != NULL);
-
+
const size_t threads_count = pthreadpool_get_threads_count(threadpool);
printf("Created thread pool with %zu threads\n", threads_count);
struct array_addition_context context = { augend, addend, sum };
pthreadpool_parallelize_1d(threadpool,
(pthreadpool_task_1d_t) add_arrays,
- (void**) &context,
+ (void*) &context,
ARRAY_SIZE,
PTHREADPOOL_FLAG_DISABLE_DENORMALS /* flags */);
-
+
pthreadpool_destroy(threadpool);
threadpool = NULL;
diff --git a/WORKSPACE b/WORKSPACE
new file mode 100644
index 0000000..4a44079
--- /dev/null
+++ b/WORKSPACE
@@ -0,0 +1,38 @@
+workspace(name = "pthreadpool")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+# Bazel rule definitions
+http_archive(
+ name = "rules_cc",
+ strip_prefix = "rules_cc-master",
+ urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"],
+)
+
+# Google Test framework, used by most unit-tests.
+http_archive(
+ name = "com_google_googletest",
+ strip_prefix = "googletest-master",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+)
+
+# Google Benchmark library, used in micro-benchmarks.
+http_archive(
+ name = "com_google_benchmark",
+ strip_prefix = "benchmark-master",
+ urls = ["https://github.com/google/benchmark/archive/master.zip"],
+)
+
+# FXdiv library, used for repeated integer division by the same factor
+http_archive(
+ name = "FXdiv",
+ strip_prefix = "FXdiv-f7dd0576a1c8289ef099d4fd8b136b1c4487a873",
+ sha256 = "6e4b6e3c58e67c3bb090e286c4f235902c89b98cf3e67442a18f9167963aa286",
+ urls = ["https://github.com/Maratyszcza/FXdiv/archive/f7dd0576a1c8289ef099d4fd8b136b1c4487a873.zip"],
+)
+
+# Android NDK location and version is auto-detected from $ANDROID_NDK_HOME environment variable
+android_ndk_repository(name = "androidndk")
+
+# Android SDK location and API is auto-detected from $ANDROID_HOME environment variable
+android_sdk_repository(name = "androidsdk")
diff --git a/bench/latency.cc b/bench/latency.cc
index f500cdf..4fb59ee 100644
--- a/bench/latency.cc
+++ b/bench/latency.cc
@@ -1,12 +1,11 @@
#include <benchmark/benchmark.h>
-#include <unistd.h>
-
#include <pthreadpool.h>
+#include <thread>
static void SetNumberOfThreads(benchmark::internal::Benchmark* benchmark) {
- const int max_threads = sysconf(_SC_NPROCESSORS_ONLN);
+ const int max_threads = std::thread::hardware_concurrency();
for (int t = 1; t <= max_threads; t++) {
benchmark->Arg(t);
}
diff --git a/bench/throughput.cc b/bench/throughput.cc
index 2242ccb..47c8da7 100644
--- a/bench/throughput.cc
+++ b/bench/throughput.cc
@@ -7,7 +7,7 @@ static void compute_1d(void*, size_t) {
}
static void pthreadpool_parallelize_1d(benchmark::State& state) {
- pthreadpool_t threadpool = pthreadpool_create(0);
+ pthreadpool_t threadpool = pthreadpool_create(2);
const size_t threads = pthreadpool_get_threads_count(threadpool);
const size_t items = static_cast<size_t>(state.range(0));
while (state.KeepRunning()) {
@@ -30,7 +30,7 @@ static void compute_1d_tile_1d(void*, size_t, size_t) {
}
static void pthreadpool_parallelize_1d_tile_1d(benchmark::State& state) {
- pthreadpool_t threadpool = pthreadpool_create(0);
+ pthreadpool_t threadpool = pthreadpool_create(2);
const size_t threads = pthreadpool_get_threads_count(threadpool);
const size_t items = static_cast<size_t>(state.range(0));
while (state.KeepRunning()) {
@@ -49,11 +49,11 @@ static void pthreadpool_parallelize_1d_tile_1d(benchmark::State& state) {
BENCHMARK(pthreadpool_parallelize_1d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
-static void compute_2d(void* context, size_t x, size_t y) {
+static void compute_2d(void*, size_t, size_t) {
}
static void pthreadpool_parallelize_2d(benchmark::State& state) {
- pthreadpool_t threadpool = pthreadpool_create(0);
+ pthreadpool_t threadpool = pthreadpool_create(2);
const size_t threads = pthreadpool_get_threads_count(threadpool);
const size_t items = static_cast<size_t>(state.range(0));
while (state.KeepRunning()) {
@@ -72,17 +72,41 @@ static void pthreadpool_parallelize_2d(benchmark::State& state) {
BENCHMARK(pthreadpool_parallelize_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
-static void compute_2d_tiled(void* context, size_t x0, size_t y0, size_t xn, size_t yn) {
+static void compute_2d_tile_1d(void*, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_2d_tile_1d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_2d_tile_1d(
+ threadpool,
+ compute_2d_tile_1d,
+ nullptr /* context */,
+ threads, items,
+ 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_2d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_2d_tile_2d(void*, size_t, size_t, size_t, size_t) {
}
static void pthreadpool_parallelize_2d_tile_2d(benchmark::State& state) {
- pthreadpool_t threadpool = pthreadpool_create(0);
+ pthreadpool_t threadpool = pthreadpool_create(2);
const size_t threads = pthreadpool_get_threads_count(threadpool);
const size_t items = static_cast<size_t>(state.range(0));
while (state.KeepRunning()) {
pthreadpool_parallelize_2d_tile_2d(
threadpool,
- compute_2d_tiled,
+ compute_2d_tile_2d,
nullptr /* context */,
threads, items,
1, 1,
@@ -96,4 +120,288 @@ static void pthreadpool_parallelize_2d_tile_2d(benchmark::State& state) {
BENCHMARK(pthreadpool_parallelize_2d_tile_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+static void compute_3d(void*, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_3d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_3d(
+ threadpool,
+ compute_3d,
+ nullptr /* context */,
+ 1, threads, items,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_3d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_3d_tile_1d(void*, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_3d_tile_1d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool,
+ compute_3d_tile_1d,
+ nullptr /* context */,
+ 1, threads, items,
+ 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_3d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_3d_tile_2d(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_3d_tile_2d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_3d_tile_2d(
+ threadpool,
+ compute_3d_tile_2d,
+ nullptr /* context */,
+ 1, threads, items,
+ 1, 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_3d_tile_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_4d(void*, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_4d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_4d(
+ threadpool,
+ compute_4d,
+ nullptr /* context */,
+ 1, 1, threads, items,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_4d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_4d_tile_1d(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_4d_tile_1d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool,
+ compute_4d_tile_1d,
+ nullptr /* context */,
+ 1, 1, threads, items,
+ 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_4d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_4d_tile_2d(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_4d_tile_2d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_4d_tile_2d(
+ threadpool,
+ compute_4d_tile_2d,
+ nullptr /* context */,
+ 1, 1, threads, items,
+ 1, 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_4d_tile_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_5d(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_5d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_5d(
+ threadpool,
+ compute_5d,
+ nullptr /* context */,
+ 1, 1, 1, threads, items,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_5d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_5d_tile_1d(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_5d_tile_1d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool,
+ compute_5d_tile_1d,
+ nullptr /* context */,
+ 1, 1, 1, threads, items,
+ 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_5d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_5d_tile_2d(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_5d_tile_2d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_5d_tile_2d(
+ threadpool,
+ compute_5d_tile_2d,
+ nullptr /* context */,
+ 1, 1, 1, threads, items,
+ 1, 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_5d_tile_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_6d(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_6d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_6d(
+ threadpool,
+ compute_6d,
+ nullptr /* context */,
+ 1, 1, 1, 1, threads, items,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_6d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_6d_tile_1d(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_6d_tile_1d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool,
+ compute_6d_tile_1d,
+ nullptr /* context */,
+ 1, 1, 1, 1, threads, items,
+ 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_6d_tile_1d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
+static void compute_6d_tile_2d(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+static void pthreadpool_parallelize_6d_tile_2d(benchmark::State& state) {
+ pthreadpool_t threadpool = pthreadpool_create(2);
+ const size_t threads = pthreadpool_get_threads_count(threadpool);
+ const size_t items = static_cast<size_t>(state.range(0));
+ while (state.KeepRunning()) {
+ pthreadpool_parallelize_6d_tile_2d(
+ threadpool,
+ compute_6d_tile_2d,
+ nullptr /* context */,
+ 1, 1, 1, 1, threads, items,
+ 1, 1,
+ 0 /* flags */);
+ }
+ pthreadpool_destroy(threadpool);
+
+ /* Do not normalize by thread */
+ state.SetItemsProcessed(int64_t(state.iterations()) * items);
+}
+BENCHMARK(pthreadpool_parallelize_6d_tile_2d)->UseRealTime()->RangeMultiplier(10)->Range(10, 1000000);
+
+
BENCHMARK_MAIN();
diff --git a/cmake/DownloadCpuinfo.cmake b/cmake/DownloadCpuinfo.cmake
new file mode 100644
index 0000000..e6f2893
--- /dev/null
+++ b/cmake/DownloadCpuinfo.cmake
@@ -0,0 +1,15 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
+
+PROJECT(cpuinfo-download NONE)
+
+INCLUDE(ExternalProject)
+ExternalProject_Add(cpuinfo
+ URL https://github.com/pytorch/cpuinfo/archive/19b9316c71e4e45b170a664bf62ddefd7ac9feb5.zip
+ URL_HASH SHA256=e0a485c072de957668eb324c49d726dc0fd736cfb9436b334325f20d93085003
+ SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source"
+ BINARY_DIR "${CMAKE_BINARY_DIR}/cpuinfo"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+ TEST_COMMAND ""
+)
diff --git a/configure.py b/configure.py
index fd4ce92..51b9b62 100755
--- a/configure.py
+++ b/configure.py
@@ -12,11 +12,15 @@ def main(args):
build.export_cpath("include", ["pthreadpool.h"])
with build.options(source_dir="src", extra_include_dirs="src", deps=build.deps.fxdiv):
- sources = ["threadpool-legacy.c"]
+ sources = ["legacy-api.c", "portable-api.c"]
if build.target.is_emscripten:
- sources.append("threadpool-shim.c")
+ sources.append("shim.c")
+ elif build.target.is_macos:
+ sources.append("gcd.c")
+ elif build.target.is_windows:
+ sources.append("windows.c")
else:
- sources.append("threadpool-pthreads.c")
+ sources.append("pthreads.c")
build.static_library("pthreadpool", [build.cc(src) for src in sources])
with build.options(source_dir="test", deps=[build, build.deps.googletest]):
diff --git a/include/pthreadpool.h b/include/pthreadpool.h
index 2443285..59c4abf 100644
--- a/include/pthreadpool.h
+++ b/include/pthreadpool.h
@@ -11,100 +11,481 @@ typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t);
typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t);
+typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t);
+typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t);
+
+/**
+ * Disable support for denormalized numbers to the maximum extent possible for
+ * the duration of the computation.
+ *
+ * Handling denormalized floating-point numbers is often implemented in
+ * microcode, and incurs significant performance degradation. This hint
+ * instructs the thread pool to disable support for denormalized numbers before
+ * running the computation by manipulating architecture-specific control
+ * registers, and restore the initial value of control registers after the
+ * computation is complete. The thread pool temporary disables denormalized
+ * numbers on all threads involved in the computation (i.e. the caller threads,
+ * and potentially worker threads).
+ *
+ * Disabling denormalized numbers may have a small negative effect on results'
+ * accuracy. As various architectures differ in capabilities to control
+ * processing of denormalized numbers, using this flag may also hurt results'
+ * reproducibility across different instruction set architectures.
+ */
#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001
+/**
+ * Yield worker threads to the system scheduler after the operation is finished.
+ *
+ * Force workers to use kernel wait (instead of active spin-wait by default) for
+ * new commands after this command is processed. This flag affects only the
+ * immediate next operation on this thread pool. To make the thread pool always
+ * use kernel wait, pass this flag to all parallelization functions.
+ */
+#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002
+
#ifdef __cplusplus
extern "C" {
#endif
/**
- * Creates a thread pool with the specified number of threads.
+ * Create a thread pool with the specified number of threads.
*
- * @param[in] threads_count The number of threads in the thread pool.
- * A value of 0 has special interpretation: it creates a thread for each
- * processor core available in the system.
+ * @param threads_count the number of threads in the thread pool.
+ * A value of 0 has special interpretation: it creates a thread pool with as
+ * many threads as there are logical processors in the system.
*
- * @returns A pointer to an opaque thread pool object.
- * On error the function returns NULL and sets errno accordingly.
+ * @returns A pointer to an opaque thread pool object if the call is
+ * successful, or NULL pointer if the call failed.
*/
pthreadpool_t pthreadpool_create(size_t threads_count);
/**
- * Queries the number of threads in a thread pool.
+ * Query the number of threads in a thread pool.
*
- * @param[in] threadpool The thread pool to query.
+ * @param threadpool the thread pool to query.
*
* @returns The number of threads in the thread pool.
*/
size_t pthreadpool_get_threads_count(pthreadpool_t threadpool);
/**
- * Processes items in parallel using threads from a thread pool.
+ * Process items on a 1D grid.
*
- * When the call returns, all items have been processed and the thread pool is
- * ready for a new task.
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range; i++)
+ * function(context, i);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
*
* @note If multiple threads call this function with the same thread pool, the
* calls are serialized.
*
- * @param[in] threadpool The thread pool to use for parallelisation.
- * @param[in] function The function to call for each item.
- * @param[in] argument The first argument passed to the @a function.
- * @param[in] items The number of items to process. The @a function
- * will be called once for each item.
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified function.
+ * @param range the number of items on the 1D grid to process. The
+ * specified function will be called once for each item.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
*/
void pthreadpool_parallelize_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_t function,
- void* argument,
+ void* context,
size_t range,
uint32_t flags);
+/**
+ * Process items on a 1D grid using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range; i++)
+ * function(context, uarch_index, i);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range the number of items on the 1D grid to process.
+ * The specified function will be called once for each item.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range,
+ uint32_t flags);
+
+/**
+ * Process items on a 1D grid with specified maximum tile size.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range; i += tile)
+ * function(context, i, min(range - i, tile));
+ *
+ * When the call returns, all items have been processed and the thread pool is
+ * ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool,
+ * the calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range the number of items on the 1D grid to process.
+ * @param tile the maximum number of items on the 1D grid to process in
+ * one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_1d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_1d_tile_1d_t function,
- void* argument,
+ void* context,
size_t range,
size_t tile,
uint32_t flags);
+/**
+ * Process items on a 2D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * function(context, i, j);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each item.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
uint32_t flags);
+/**
+ * Process items on a 2D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, i, j, min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d_tile_1d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_1d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t tile_j,
uint32_t flags);
+/**
+ * Process items on a 2D grid with the specified maximum tile size along each
+ * grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i += tile_i)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, i, j,
+ * min(range_i - i, tile_i), min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 2D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 2D grid.
+ * @param tile_j the maximum number of items along the first dimension of
+ * the 2D grid to process in one function call.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_2d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_2d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t tile_i,
size_t tile_j,
uint32_t flags);
+/**
+ * Process items on a 2D grid with the specified maximum tile size along each
+ * grid dimension using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i += tile_i)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * function(context, uarch_index, i, j,
+ * min(range_i - i, tile_i), min(range_j - j, tile_j));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo,
+ * cpuinfo initialization failed, or index returned
+ * by cpuinfo_get_current_uarch_index() exceeds
+ * the max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected
+ * by the specified function. If the index returned
+ * by cpuinfo_get_current_uarch_index() exceeds this
+ * value, default_uarch_index will be used instead.
+ * default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 2D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 2D grid.
+ * @param tile_j the maximum number of items along the first
+ * dimension of the 2D grid to process in one function call.
+ * @param tile_j the maximum number of items along the second
+ * dimension of the 2D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags);
+
+/**
+ * Process items on a 3D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * function(context, i, j, k);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags);
+
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, i, j, k, min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags);
+
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, i, j, k,
+ * min(range_j - j, tile_j), min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 3D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 3D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 3D grid.
+ * @param tile_j the maximum number of items along the second dimension of
+ * the 3D grid to process in one function call.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_3d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_3d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -112,10 +493,264 @@ void pthreadpool_parallelize_3d_tile_2d(
size_t tile_k,
uint32_t flags);
+/**
+ * Process items on a 3D grid with the specified maximum tile size along the
+ * last two grid dimensions using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j += tile_j)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * function(context, uarch_index, i, j, k,
+ * min(range_j - j, tile_j), min(range_k - k, tile_k));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 3D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 3D grid.
+ * @param range_k the number of items to process along the third
+ * dimension of the 3D grid.
+ * @param tile_j the maximum number of items along the second
+ * dimension of the 3D grid to process in one function call.
+ * @param tile_k the maximum number of items along the third
+ * dimension of the 3D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * function(context, i, j, k, l);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, i, j, k, l, min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, i, j, k, l,
+ * min(range_k - k, tile_k), min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 4D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 4D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 4D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 4D grid.
+ * @param tile_k the maximum number of items along the third dimension of
+ * the 4D grid to process in one function call.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_4d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_4d_tile_2d_t function,
- void* argument,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags);
+
+/**
+ * Process items on a 4D grid with the specified maximum tile size along the
+ * last two grid dimensions using a microarchitecture-aware task function.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * uint32_t uarch_index = cpuinfo_initialize() ?
+ * cpuinfo_get_current_uarch_index() : default_uarch_index;
+ * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index;
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k += tile_k)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * function(context, uarch_index, i, j, k, l,
+ * min(range_k - k, tile_k), min(range_l - l, tile_l));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If
+ * threadpool is NULL, all items are processed serially on the calling
+ * thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified
+ * function.
+ * @param default_uarch_index the microarchitecture index to use when
+ * pthreadpool is configured without cpuinfo, cpuinfo initialization failed,
+ * or index returned by cpuinfo_get_current_uarch_index() exceeds the
+ * max_uarch_index value.
+ * @param max_uarch_index the maximum microarchitecture index expected by
+ * the specified function. If the index returned by
+ * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index
+ * will be used instead. default_uarch_index can exceed max_uarch_index.
+ * @param range_i the number of items to process along the first
+ * dimension of the 4D grid.
+ * @param range_j the number of items to process along the second
+ * dimension of the 4D grid.
+ * @param range_k the number of items to process along the third
+ * dimension of the 4D grid.
+ * @param range_l the number of items to process along the fourth
+ * dimension of the 4D grid.
+ * @param tile_k the maximum number of items along the third
+ * dimension of the 4D grid to process in one function call.
+ * @param tile_l the maximum number of items along the fourth
+ * dimension of the 4D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional
+ * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or
+ * PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t function,
+ void* context,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -124,10 +759,147 @@ void pthreadpool_parallelize_4d_tile_2d(
size_t tile_l,
uint32_t flags);
+/**
+ * Process items on a 5D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m++)
+ * function(context, i, j, k, l, m);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags);
+
+/**
+ * Process items on a 5D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * function(context, i, j, k, l, m, min(range_m - m, tile_m));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 5D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags);
+
+/**
+ * Process items on a 5D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l += tile_l)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * function(context, i, j, k, l, m,
+ * min(range_l - l, tile_l), min(range_m - m, tile_m));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 5D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 5D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 5D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 5D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 5D grid.
+ * @param tile_l the maximum number of items along the fourth dimension of
+ * the 5D grid to process in one function call.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 5D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_5d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_5d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t range_k,
@@ -137,10 +909,160 @@ void pthreadpool_parallelize_5d_tile_2d(
size_t tile_m,
uint32_t flags);
+/**
+ * Process items on a 6D grid.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m++)
+ * for (size_t n = 0; n < range_n; n++)
+ * function(context, i, j, k, l, m, n);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 6D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 6D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 6D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 6D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 6D grid.
+ * @param range_n the number of items to process along the sixth dimension
+ * of the 6D grid.
+ * @param tile_n the maximum number of items along the sixth dimension of
+ * the 6D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_6d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ uint32_t flags);
+
+/**
+ * Process items on a 6D grid with the specified maximum tile size along the
+ * last grid dimension.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m++)
+ * for (size_t n = 0; n < range_n; n += tile_n)
+ * function(context, i, j, k, l, m, n, min(range_n - n, tile_n));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 6D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 6D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 6D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 6D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 6D grid.
+ * @param range_n the number of items to process along the sixth dimension
+ * of the 6D grid.
+ * @param tile_n the maximum number of items along the sixth dimension of
+ * the 6D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_6d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_tile_1d_t function,
+ void* context,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ size_t tile_n,
+ uint32_t flags);
+
+/**
+ * Process items on a 6D grid with the specified maximum tile size along the
+ * last two grid dimensions.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ * for (size_t i = 0; i < range_i; i++)
+ * for (size_t j = 0; j < range_j; j++)
+ * for (size_t k = 0; k < range_k; k++)
+ * for (size_t l = 0; l < range_l; l++)
+ * for (size_t m = 0; m < range_m; m += tile_m)
+ * for (size_t n = 0; n < range_n; n += tile_n)
+ * function(context, i, j, k, l, m, n,
+ * min(range_m - m, tile_m), min(range_n - n, tile_n));
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ * calls are serialized.
+ *
+ * @param threadpool the thread pool to use for parallelisation. If threadpool
+ * is NULL, all items are processed serially on the calling thread.
+ * @param function the function to call for each tile.
+ * @param context the first argument passed to the specified function.
+ * @param range_i the number of items to process along the first dimension
+ * of the 6D grid.
+ * @param range_j the number of items to process along the second dimension
+ * of the 6D grid.
+ * @param range_k the number of items to process along the third dimension
+ * of the 6D grid.
+ * @param range_l the number of items to process along the fourth dimension
+ * of the 6D grid.
+ * @param range_m the number of items to process along the fifth dimension
+ * of the 6D grid.
+ * @param range_n the number of items to process along the sixth dimension
+ * of the 6D grid.
+ * @param tile_m the maximum number of items along the fifth dimension of
+ * the 6D grid to process in one function call.
+ * @param tile_n the maximum number of items along the sixth dimension of
+ * the 6D grid to process in one function call.
+ * @param flags a bitwise combination of zero or more optional flags
+ * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
void pthreadpool_parallelize_6d_tile_2d(
pthreadpool_t threadpool,
pthreadpool_task_6d_tile_2d_t function,
- void* argument,
+ void* context,
size_t range_i,
size_t range_j,
size_t range_k,
diff --git a/src/fastpath.c b/src/fastpath.c
new file mode 100644
index 0000000..b914ff0
--- /dev/null
+++ b/src/fastpath.c
@@ -0,0 +1,1327 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if PTHREADPOOL_USE_CPUINFO
+ #include <cpuinfo.h>
+#endif
+
+/* Dependencies */
+#include <fxdiv.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-common.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_with_id_t task = (pthreadpool_task_1d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, uarch_index, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_tile_1d_t task = (pthreadpool_task_1d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const size_t tile = threadpool->params.parallelize_1d_tile_1d.tile;
+ size_t tile_start = range_start * tile;
+
+ const size_t range = threadpool->params.parallelize_1d_tile_1d.range;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, tile_start, min(range - tile_start, tile));
+ tile_start += tile;
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t tile_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const size_t tile_start = tile_index * tile;
+ task(argument, tile_start, min(range - tile_start, tile));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_t task = (pthreadpool_task_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(range_start, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j);
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(linear_index, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_1d_t task = (pthreadpool_task_2d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_1d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_1d.tile_j;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_j = threadpool->params.parallelize_2d_tile_1d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, start_j, min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, tile_index_i_j.quotient, start_j, min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_t task = (pthreadpool_task_2d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d.tile_i;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d.tile_j;
+ size_t start_i = tile_index_i_j.quotient * tile_i;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d.range_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_with_id_t task = (pthreadpool_task_2d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_2d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_2d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_range_j;
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_result_size_t index = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_i;
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_j;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_j;
+ size_t start_i = index.quotient * tile_i;
+ size_t start_j = index.remainder * tile_j;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_t task = (pthreadpool_task_3d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_3d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(range_start, range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k);
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(linear_index, range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_1d_t task = (pthreadpool_task_3d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_1d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_1d.tile_k;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, start_k, min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_t task = (pthreadpool_task_3d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_with_id_t task = (pthreadpool_task_3d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_3d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_3d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_j;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, uarch_index, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_t task = (pthreadpool_task_4d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_4d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(range_start, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_4d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+
+ const size_t range_k = threadpool->params.parallelize_4d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l);
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(linear_index, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_1d_t task = (pthreadpool_task_4d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_1d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_1d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_1d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = tile_index_k_l.quotient;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_1d.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, start_l, min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, tile_index_k_l.quotient, start_l, min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_t task = (pthreadpool_task_4d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_with_id_t task = (pthreadpool_task_4d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_4d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_4d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, uarch_index, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, uarch_index, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_t task = (pthreadpool_task_5d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lm = threadpool->params.parallelize_5d.range_lm;
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(range_start, range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_5d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_5d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, m);
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(linear_index, range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_1d_t task = (pthreadpool_task_5d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_1d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(range_start, tile_range_m);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_5d_tile_1d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_5d_tile_1d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_1d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_1d.range_m;
+ const size_t range_k = threadpool->params.parallelize_5d_tile_1d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, start_m, min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(linear_index, tile_range_m);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder, start_m,
+ min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_2d_t task = (pthreadpool_task_5d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_lm = threadpool->params.parallelize_5d_tile_2d.tile_range_lm;
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(range_start, tile_range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d_tile_2d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_2d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t tile_l = threadpool->params.parallelize_5d_tile_2d.tile_l;
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_2d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t start_l = tile_index_l_m.quotient * tile_l;
+ size_t start_m = tile_index_l_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_2d.range_m;
+ const size_t range_l = threadpool->params.parallelize_5d_tile_2d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(linear_index, tile_range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t start_l = tile_index_l_m.quotient * tile_l;
+ const size_t start_m = tile_index_l_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder,
+ start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_t task = (pthreadpool_task_6d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lmn = threadpool->params.parallelize_6d.range_lmn;
+ const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(range_start, range_lmn);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_n = threadpool->params.parallelize_6d.range_n;
+ const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+ size_t n = index_lm_n.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_6d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, m, n);
+ if (++n == range_n.value) {
+ n = 0;
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(linear_index, range_lmn);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, index_lm_n.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_tile_1d_t task = (pthreadpool_task_6d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_lmn = threadpool->params.parallelize_6d_tile_1d.tile_range_lmn;
+ const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(range_start, tile_range_lmn);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d_tile_1d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_1d.tile_range_n;
+ const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d_tile_1d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m);
+ const size_t tile_n = threadpool->params.parallelize_6d_tile_1d.tile_n;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+ size_t start_n = tile_index_lm_n.remainder * tile_n;
+
+ const size_t range_n = threadpool->params.parallelize_6d_tile_1d.range_n;
+ const size_t range_l = threadpool->params.parallelize_6d_tile_1d.range_l;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, m, start_n, min(range_n - start_n, tile_n));
+ start_n += tile_n;
+ if (start_n >= range_n) {
+ start_n = 0;
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m);
+ const size_t start_n = tile_index_lm_n.remainder * tile_n;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder,
+ start_n, min(range_n - start_n, tile_n));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread)
+{
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_tile_2d_t task = (pthreadpool_task_6d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const size_t threads_count = threadpool->threads_count.value;
+ const size_t range_threshold = -threads_count;
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_mn = threadpool->params.parallelize_6d_tile_2d.tile_range_mn;
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(range_start, tile_range_mn);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_6d_tile_2d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_2d.tile_range_n;
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_6d_tile_2d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_6d_tile_2d.tile_m;
+ const size_t tile_n = threadpool->params.parallelize_6d_tile_2d.tile_n;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_m_n.quotient * tile_m;
+ size_t start_n = tile_index_m_n.remainder * tile_n;
+
+ const size_t range_n = threadpool->params.parallelize_6d_tile_2d.range_n;
+ const size_t range_m = threadpool->params.parallelize_6d_tile_2d.range_m;
+ const size_t range_k = threadpool->params.parallelize_6d_tile_2d.range_k;
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+ task(argument, i, j, k, l, start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ start_n += tile_n;
+ if (start_n >= range_n) {
+ start_n = 0;
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(linear_index, tile_range_mn);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t start_m = tile_index_m_n.quotient * tile_m;
+ const size_t start_n = tile_index_m_n.remainder * tile_n;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder,
+ start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
diff --git a/src/gcd.c b/src/gcd.c
new file mode 100644
index 0000000..ddd9af4
--- /dev/null
+++ b/src/gcd.c
@@ -0,0 +1,136 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Configuration header */
+#include "threadpool-common.h"
+
+/* Mach headers */
+#include <dispatch/dispatch.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+static void thread_main(void* arg, size_t thread_index) {
+ struct pthreadpool* threadpool = (struct pthreadpool*) arg;
+ struct thread_info* thread = &threadpool->threads[thread_index];
+
+ const uint32_t flags = pthreadpool_load_relaxed_uint32_t(&threadpool->flags);
+ const thread_function_t thread_function =
+ (thread_function_t) pthreadpool_load_relaxed_void_p(&threadpool->thread_function);
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ thread_function(threadpool, thread);
+
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+}
+
+struct pthreadpool* pthreadpool_create(size_t threads_count) {
+ if (threads_count == 0) {
+ int threads = 1;
+ size_t sizeof_threads = sizeof(threads);
+ if (sysctlbyname("hw.logicalcpu_max", &threads, &sizeof_threads, NULL, 0) != 0) {
+ return NULL;
+ }
+
+ if (threads <= 0) {
+ return NULL;
+ }
+
+ threads_count = (size_t) threads;
+ }
+
+ struct pthreadpool* threadpool = pthreadpool_allocate(threads_count);
+ if (threadpool == NULL) {
+ return NULL;
+ }
+ threadpool->threads_count = fxdiv_init_size_t(threads_count);
+ for (size_t tid = 0; tid < threads_count; tid++) {
+ threadpool->threads[tid].thread_number = tid;
+ }
+
+ /* Thread pool with a single thread computes everything on the caller thread. */
+ if (threads_count > 1) {
+ threadpool->execution_semaphore = dispatch_semaphore_create(1);
+ }
+ return threadpool;
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_parallelize(
+ struct pthreadpool* threadpool,
+ thread_function_t thread_function,
+ const void* params,
+ size_t params_size,
+ void* task,
+ void* context,
+ size_t linear_range,
+ uint32_t flags)
+{
+ assert(threadpool != NULL);
+ assert(thread_function != NULL);
+ assert(task != NULL);
+ assert(linear_range > 1);
+
+ /* Protect the global threadpool structures */
+ dispatch_semaphore_wait(threadpool->execution_semaphore, DISPATCH_TIME_FOREVER);
+
+ /* Setup global arguments */
+ pthreadpool_store_relaxed_void_p(&threadpool->thread_function, (void*) thread_function);
+ pthreadpool_store_relaxed_void_p(&threadpool->task, task);
+ pthreadpool_store_relaxed_void_p(&threadpool->argument, context);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->flags, flags);
+
+ /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
+ const struct fxdiv_divisor_size_t threads_count = threadpool->threads_count;
+
+ if (params_size != 0) {
+ memcpy(&threadpool->params, params, params_size);
+ }
+
+ /* Spread the work between threads */
+ const struct fxdiv_result_size_t range_params = fxdiv_divide_size_t(linear_range, threads_count);
+ size_t range_start = 0;
+ for (size_t tid = 0; tid < threads_count.value; tid++) {
+ struct thread_info* thread = &threadpool->threads[tid];
+ const size_t range_length = range_params.quotient + (size_t) (tid < range_params.remainder);
+ const size_t range_end = range_start + range_length;
+ pthreadpool_store_relaxed_size_t(&thread->range_start, range_start);
+ pthreadpool_store_relaxed_size_t(&thread->range_end, range_end);
+ pthreadpool_store_relaxed_size_t(&thread->range_length, range_length);
+
+ /* The next subrange starts where the previous ended */
+ range_start = range_end;
+ }
+
+ dispatch_apply_f(threads_count.value, DISPATCH_APPLY_AUTO, threadpool, thread_main);
+
+ /* Unprotect the global threadpool structures */
+ dispatch_semaphore_signal(threadpool->execution_semaphore);
+}
+
+void pthreadpool_destroy(struct pthreadpool* threadpool) {
+ if (threadpool != NULL) {
+ if (threadpool->execution_semaphore != NULL) {
+ /* Release resources */
+ dispatch_release(threadpool->execution_semaphore);
+ }
+ pthreadpool_deallocate(threadpool);
+ }
+}
diff --git a/src/threadpool-legacy.c b/src/legacy-api.c
index 43fb798..8d5a6fd 100644
--- a/src/threadpool-legacy.c
+++ b/src/legacy-api.c
@@ -4,21 +4,12 @@
/* Dependencies */
#include <fxdiv.h>
-/* Library header */
+/* Public library header */
#include <pthreadpool.h>
+/* Internal library headers */
+#include "threadpool-utils.h"
-static inline size_t divide_round_up(size_t dividend, size_t divisor) {
- if (dividend % divisor == 0) {
- return dividend / divisor;
- } else {
- return dividend / divisor + 1;
- }
-}
-
-static inline size_t min(size_t a, size_t b) {
- return a < b ? a : b;
-}
void pthreadpool_compute_1d(
pthreadpool_t threadpool,
diff --git a/src/memory.c b/src/memory.c
new file mode 100644
index 0000000..fc0d83e
--- /dev/null
+++ b/src/memory.c
@@ -0,0 +1,66 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* POSIX headers */
+#ifdef __ANDROID__
+ #include <malloc.h>
+#endif
+
+/* Windows headers */
+#ifdef _WIN32
+ #include <malloc.h>
+#endif
+
+/* Internal library headers */
+#include "threadpool-common.h"
+#include "threadpool-object.h"
+
+
+PTHREADPOOL_INTERNAL struct pthreadpool* pthreadpool_allocate(
+ size_t threads_count)
+{
+ assert(threads_count >= 1);
+
+ const size_t threadpool_size = sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info);
+ struct pthreadpool* threadpool = NULL;
+ #if defined(__ANDROID__)
+ /*
+ * Android didn't get posix_memalign until API level 17 (Android 4.2).
+ * Use (otherwise obsolete) memalign function on Android platform.
+ */
+ threadpool = memalign(PTHREADPOOL_CACHELINE_SIZE, threadpool_size);
+ if (threadpool == NULL) {
+ return NULL;
+ }
+ #elif defined(_WIN32)
+ threadpool = _aligned_malloc(threadpool_size, PTHREADPOOL_CACHELINE_SIZE);
+ if (threadpool == NULL) {
+ return NULL;
+ }
+ #else
+ if (posix_memalign((void**) &threadpool, PTHREADPOOL_CACHELINE_SIZE, threadpool_size) != 0) {
+ return NULL;
+ }
+ #endif
+ memset(threadpool, 0, threadpool_size);
+ return threadpool;
+}
+
+
+PTHREADPOOL_INTERNAL void pthreadpool_deallocate(
+ struct pthreadpool* threadpool)
+{
+ assert(threadpool != NULL);
+
+ const size_t threadpool_size = sizeof(struct pthreadpool) + threadpool->threads_count.value * sizeof(struct thread_info);
+ memset(threadpool, 0, threadpool_size);
+
+ #ifdef _WIN32
+ _aligned_free(threadpool);
+ #else
+ free(threadpool);
+ #endif
+}
diff --git a/src/portable-api.c b/src/portable-api.c
new file mode 100644
index 0000000..42d0369
--- /dev/null
+++ b/src/portable-api.c
@@ -0,0 +1,2384 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if PTHREADPOOL_USE_CPUINFO
+ #include <cpuinfo.h>
+#endif
+
+/* Dependencies */
+#include <fxdiv.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+
+size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
+ if (threadpool == NULL) {
+ return 1;
+ }
+
+ return threadpool->threads_count.value;
+}
+
+static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_1d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_with_id_t task = (pthreadpool_task_1d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ /* Process thread's own range of items */
+ size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, uarch_index, range_start++);
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ task(argument, uarch_index, index);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_1d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_1d_tile_1d_t task = (pthreadpool_task_1d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const size_t tile = threadpool->params.parallelize_1d_tile_1d.tile;
+ size_t tile_start = range_start * tile;
+
+ const size_t range = threadpool->params.parallelize_1d_tile_1d.range;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, tile_start, min(range - tile_start, tile));
+ tile_start += tile;
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t tile_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const size_t tile_start = tile_index * tile;
+ task(argument, tile_start, min(range - tile_start, tile));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_t task = (pthreadpool_task_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(range_start, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j);
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(linear_index, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_2d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_1d_t task = (pthreadpool_task_2d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_1d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_1d.tile_j;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_j = threadpool->params.parallelize_2d_tile_1d.range_j;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, start_j, min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, tile_index_i_j.quotient, start_j, min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_2d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_t task = (pthreadpool_task_2d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d.tile_i;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d.tile_j;
+ size_t start_i = tile_index_i_j.quotient * tile_i;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d.range_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d.range_j;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_2d_tile_2d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_2d_tile_2d_with_id_t task = (pthreadpool_task_2d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_2d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_2d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ /* Process thread's own range of items */
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_range_j;
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_result_size_t index = fxdiv_divide_size_t(range_start, tile_range_j);
+ const size_t range_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_i;
+ const size_t tile_i = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_i;
+ const size_t range_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.range_j;
+ const size_t tile_j = threadpool->params.parallelize_2d_tile_2d_with_uarch.tile_j;
+ size_t start_i = index.quotient * tile_i;
+ size_t start_j = index.remainder * tile_j;
+
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ start_i += tile_i;
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(linear_index, tile_range_j);
+ const size_t start_i = tile_index_i_j.quotient * tile_i;
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ task(argument, uarch_index, start_i, start_j, min(range_i - start_i, tile_i), min(range_j - start_j, tile_j));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_3d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_t task = (pthreadpool_task_3d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_3d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(range_start, range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k);
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(linear_index, range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_3d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_1d_t task = (pthreadpool_task_3d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_1d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_3d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_1d.tile_k;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_1d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, start_k, min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_3d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_t task = (pthreadpool_task_3d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d.range_j;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_3d_tile_2d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_3d_tile_2d_with_id_t task = (pthreadpool_task_3d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_3d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_3d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_k;
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(range_start, tile_range_k);
+ const struct fxdiv_divisor_size_t tile_range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_range_j;
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t tile_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_j;
+ const size_t tile_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.tile_k;
+ size_t i = tile_index_i_j.quotient;
+ size_t start_j = tile_index_i_j.remainder * tile_j;
+ size_t start_k = tile_index_ij_k.remainder * tile_k;
+
+ const size_t range_k = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_k;
+ const size_t range_j = threadpool->params.parallelize_3d_tile_2d_with_uarch.range_j;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, uarch_index, i, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ start_j += tile_j;
+ if (start_j >= range_j) {
+ start_j = 0;
+ i += 1;
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
+ const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
+ const size_t start_j = tile_index_i_j.remainder * tile_j;
+ const size_t start_k = tile_index_ij_k.remainder * tile_k;
+ task(argument, uarch_index, tile_index_i_j.quotient, start_j, start_k, min(range_j - start_j, tile_j), min(range_k - start_k, tile_k));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_4d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_t task = (pthreadpool_task_4d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_4d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(range_start, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_4d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+
+ const size_t range_k = threadpool->params.parallelize_4d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l);
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(linear_index, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_4d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_1d_t task = (pthreadpool_task_4d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_1d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_1d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_1d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = tile_index_k_l.quotient;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_k = threadpool->params.parallelize_4d_tile_1d.range_k;
+ const size_t range_l = threadpool->params.parallelize_4d_tile_1d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, start_l, min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, tile_index_k_l.quotient, start_l, min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_4d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_t task = (pthreadpool_task_4d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_4d_tile_2d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_4d_tile_2d_with_id_t task = (pthreadpool_task_4d_tile_2d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ const uint32_t default_uarch_index = threadpool->params.parallelize_4d_tile_2d_with_uarch.default_uarch_index;
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > threadpool->params.parallelize_4d_tile_2d_with_uarch.max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_kl = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_kl;
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(range_start, tile_range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t tile_range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_range_l;
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t tile_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_k;
+ const size_t tile_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.tile_l;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t start_k = tile_index_k_l.quotient * tile_k;
+ size_t start_l = tile_index_k_l.remainder * tile_l;
+
+ const size_t range_l = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_l;
+ const size_t range_k = threadpool->params.parallelize_4d_tile_2d_with_uarch.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, uarch_index, i, j, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ start_k += tile_k;
+ if (start_k >= range_k) {
+ start_k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
+ const size_t start_k = tile_index_k_l.quotient * tile_k;
+ const size_t start_l = tile_index_k_l.remainder * tile_l;
+ task(argument, uarch_index, index_i_j.quotient, index_i_j.remainder, start_k, start_l, min(range_k - start_k, tile_k), min(range_l - start_l, tile_l));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_5d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_t task = (pthreadpool_task_5d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lm = threadpool->params.parallelize_5d.range_lm;
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(range_start, range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_5d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_5d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, m);
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lm = fxdiv_divide_size_t(linear_index, range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_ijk_lm.remainder, range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_5d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_1d_t task = (pthreadpool_task_5d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_1d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(range_start, tile_range_m);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_5d_tile_1d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_5d_tile_1d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_1d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_1d.range_m;
+ const size_t range_k = threadpool->params.parallelize_5d_tile_1d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, start_m, min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_m = fxdiv_divide_size_t(linear_index, tile_range_m);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_m.quotient, range_kl);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ size_t start_m = tile_index_ijkl_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder, start_m,
+ min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_5d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_5d_tile_2d_t task = (pthreadpool_task_5d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_lm = threadpool->params.parallelize_5d_tile_2d.tile_range_lm;
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(range_start, tile_range_lm);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_5d_tile_2d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_divisor_size_t tile_range_m = threadpool->params.parallelize_5d_tile_2d.tile_range_m;
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_5d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t tile_l = threadpool->params.parallelize_5d_tile_2d.tile_l;
+ const size_t tile_m = threadpool->params.parallelize_5d_tile_2d.tile_m;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t start_l = tile_index_l_m.quotient * tile_l;
+ size_t start_m = tile_index_l_m.remainder * tile_m;
+
+ const size_t range_m = threadpool->params.parallelize_5d_tile_2d.range_m;
+ const size_t range_l = threadpool->params.parallelize_5d_tile_2d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ start_l += tile_l;
+ if (start_l >= range_l) {
+ start_l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(linear_index, tile_range_lm);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
+ const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const size_t start_l = tile_index_l_m.quotient * tile_l;
+ const size_t start_m = tile_index_l_m.remainder * tile_m;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder,
+ start_l, start_m, min(range_l - start_l, tile_l), min(range_m - start_m, tile_m));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_6d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_t task = (pthreadpool_task_6d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t range_lmn = threadpool->params.parallelize_6d.range_lmn;
+ const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(range_start, range_lmn);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_divisor_size_t range_n = threadpool->params.parallelize_6d.range_n;
+ const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m);
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+ size_t n = index_lm_n.remainder;
+
+ const size_t range_l = threadpool->params.parallelize_6d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, m, n);
+ if (++n == range_n.value) {
+ n = 0;
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t index_ijk_lmn = fxdiv_divide_size_t(linear_index, range_lmn);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_result_size_t index_lm_n = fxdiv_divide_size_t(index_ijk_lmn.remainder, range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(index_lm_n.quotient, range_m);
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder, index_lm_n.remainder);
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_6d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_tile_1d_t task = (pthreadpool_task_6d_tile_1d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_lmn = threadpool->params.parallelize_6d_tile_1d.tile_range_lmn;
+ const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(range_start, tile_range_lmn);
+ const struct fxdiv_divisor_size_t range_k = threadpool->params.parallelize_6d_tile_1d.range_k;
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_1d.tile_range_n;
+ const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_1d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_m = threadpool->params.parallelize_6d_tile_1d.range_m;
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m);
+ const size_t tile_n = threadpool->params.parallelize_6d_tile_1d.tile_n;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_ij_k.remainder;
+ size_t l = index_l_m.quotient;
+ size_t m = index_l_m.remainder;
+ size_t start_n = tile_index_lm_n.remainder * tile_n;
+
+ const size_t range_n = threadpool->params.parallelize_6d_tile_1d.range_n;
+ const size_t range_l = threadpool->params.parallelize_6d_tile_1d.range_l;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, m, start_n, min(range_n - start_n, tile_n));
+ start_n += tile_n;
+ if (start_n >= range_n) {
+ start_n = 0;
+ if (++m == range_m.value) {
+ m = 0;
+ if (++l == range_l) {
+ l = 0;
+ if (++k == range_k.value) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn);
+ const struct fxdiv_result_size_t index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k);
+ const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_k.quotient, range_j);
+ const struct fxdiv_result_size_t index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, range_m);
+ const size_t start_n = tile_index_lm_n.remainder * tile_n;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_ij_k.remainder, index_l_m.quotient, index_l_m.remainder,
+ start_n, min(range_n - start_n, tile_n));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+static void thread_parallelize_6d_tile_2d(struct pthreadpool* threadpool, struct thread_info* thread) {
+ assert(threadpool != NULL);
+ assert(thread != NULL);
+
+ const pthreadpool_task_6d_tile_2d_t task = (pthreadpool_task_6d_tile_2d_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+ void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+ /* Process thread's own range of items */
+ const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+ const struct fxdiv_divisor_size_t tile_range_mn = threadpool->params.parallelize_6d_tile_2d.tile_range_mn;
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(range_start, tile_range_mn);
+ const struct fxdiv_divisor_size_t range_kl = threadpool->params.parallelize_6d_tile_2d.range_kl;
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_divisor_size_t tile_range_n = threadpool->params.parallelize_6d_tile_2d.tile_range_n;
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_6d_tile_2d.range_j;
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_divisor_size_t range_l = threadpool->params.parallelize_6d_tile_2d.range_l;
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t tile_m = threadpool->params.parallelize_6d_tile_2d.tile_m;
+ const size_t tile_n = threadpool->params.parallelize_6d_tile_2d.tile_n;
+ size_t i = index_i_j.quotient;
+ size_t j = index_i_j.remainder;
+ size_t k = index_k_l.quotient;
+ size_t l = index_k_l.remainder;
+ size_t start_m = tile_index_m_n.quotient * tile_m;
+ size_t start_n = tile_index_m_n.remainder * tile_n;
+
+ const size_t range_n = threadpool->params.parallelize_6d_tile_2d.range_n;
+ const size_t range_m = threadpool->params.parallelize_6d_tile_2d.range_m;
+ const size_t range_k = threadpool->params.parallelize_6d_tile_2d.range_k;
+ while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+ task(argument, i, j, k, l, start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ start_n += tile_n;
+ if (start_n >= range_n) {
+ start_n = 0;
+ start_m += tile_m;
+ if (start_m >= range_m) {
+ start_m = 0;
+ if (++l == range_l.value) {
+ l = 0;
+ if (++k == range_k) {
+ k = 0;
+ if (++j == range_j.value) {
+ j = 0;
+ i += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* There still may be other threads with work */
+ const size_t thread_number = thread->thread_number;
+ const size_t threads_count = threadpool->threads_count.value;
+ for (size_t tid = modulo_decrement(thread_number, threads_count);
+ tid != thread_number;
+ tid = modulo_decrement(tid, threads_count))
+ {
+ struct thread_info* other_thread = &threadpool->threads[tid];
+ while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+ const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+ const struct fxdiv_result_size_t tile_index_ijkl_mn = fxdiv_divide_size_t(linear_index, tile_range_mn);
+ const struct fxdiv_result_size_t index_ij_kl = fxdiv_divide_size_t(tile_index_ijkl_mn.quotient, range_kl);
+ const struct fxdiv_result_size_t tile_index_m_n = fxdiv_divide_size_t(tile_index_ijkl_mn.remainder, tile_range_n);
+ const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(index_ij_kl.quotient, range_j);
+ const struct fxdiv_result_size_t index_k_l = fxdiv_divide_size_t(index_ij_kl.remainder, range_l);
+ const size_t start_m = tile_index_m_n.quotient * tile_m;
+ const size_t start_n = tile_index_m_n.remainder * tile_n;
+ task(argument, index_i_j.quotient, index_i_j.remainder, index_k_l.quotient, index_k_l.remainder,
+ start_m, start_n, min(range_m - start_m, tile_m), min(range_n - start_n, tile_n));
+ }
+ }
+
+ /* Make changes by this thread visible to other threads */
+ pthreadpool_fence_release();
+}
+
+void pthreadpool_parallelize_1d(
+ struct pthreadpool* threadpool,
+ pthreadpool_task_1d_t task,
+ void* argument,
+ size_t range,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range; i++) {
+ task(argument, i);
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ thread_function_t parallelize_1d = &thread_parallelize_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d = &pthreadpool_thread_parallelize_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_1d, NULL, 0,
+ (void*) task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range; i++) {
+ task(argument, uarch_index, i);
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const struct pthreadpool_1d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ };
+ thread_function_t parallelize_1d_with_uarch = &thread_parallelize_1d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d_with_uarch = &pthreadpool_thread_parallelize_1d_with_uarch_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_1d_with_uarch, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_1d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_tile_1d_t task,
+ void* argument,
+ size_t range,
+ size_t tile,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || range <= tile) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range; i += tile) {
+ task(argument, i, min(range - i, tile));
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range = divide_round_up(range, tile);
+ const struct pthreadpool_1d_tile_1d_params params = {
+ .range = range,
+ .tile = tile,
+ };
+ thread_function_t parallelize_1d_tile_1d = &thread_parallelize_1d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_1d_tile_1d = &pthreadpool_thread_parallelize_1d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_1d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ task(argument, i, j);
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range = range_i * range_j;
+ const struct pthreadpool_2d_params params = {
+ .range_j = fxdiv_init_size_t(range_j),
+ };
+ thread_function_t parallelize_2d = &thread_parallelize_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_2d = &pthreadpool_thread_parallelize_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_2d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_j,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, i, j, min(range_j - j, tile_j));
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = range_i * tile_range_j;
+ const struct pthreadpool_2d_tile_1d_params params = {
+ .range_j = range_j,
+ .tile_j = tile_j,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ };
+ thread_function_t parallelize_2d_tile_1d = &thread_parallelize_2d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_1d = &pthreadpool_thread_parallelize_2d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_2d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_i = divide_round_up(range_i, tile_i);
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = tile_range_i * tile_range_j;
+ const struct pthreadpool_2d_tile_2d_params params = {
+ .range_i = range_i,
+ .tile_i = tile_i,
+ .range_j = range_j,
+ .tile_j = tile_j,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ };
+ thread_function_t parallelize_2d_tile_2d = &thread_parallelize_2d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_2d = &pthreadpool_thread_parallelize_2d_tile_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_2d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= tile_i && range_j <= tile_j)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, uarch_index, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_i = divide_round_up(range_i, tile_i);
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range = tile_range_i * tile_range_j;
+ const struct pthreadpool_2d_tile_2d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ .range_i = range_i,
+ .tile_i = tile_i,
+ .range_j = range_j,
+ .tile_j = tile_j,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ };
+ thread_function_t parallelize_2d_tile_2d_with_uarch = &thread_parallelize_2d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_2d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_2d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ task(argument, i, j, k);
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range = range_i * range_j * range_k;
+ const struct pthreadpool_3d_params params = {
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ };
+ thread_function_t parallelize_3d = &thread_parallelize_3d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_3d = &pthreadpool_thread_parallelize_3d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k, min(range_k - k, tile_k));
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * range_j * tile_range_k;
+ const struct pthreadpool_3d_tile_1d_params params = {
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_k = fxdiv_init_size_t(tile_range_k),
+ };
+ thread_function_t parallelize_3d_tile_1d = &thread_parallelize_3d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_1d = &pthreadpool_thread_parallelize_3d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k, min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * tile_range_j * tile_range_k;
+ const struct pthreadpool_3d_tile_2d_params params = {
+ .range_j = range_j,
+ .tile_j = tile_j,
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ .tile_range_k = fxdiv_init_size_t(tile_range_k),
+ };
+ thread_function_t parallelize_3d_tile_2d = &thread_parallelize_3d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_2d = &pthreadpool_thread_parallelize_3d_tile_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, uarch_index, i, j, k, min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_j = divide_round_up(range_j, tile_j);
+ const size_t tile_range_k = divide_round_up(range_k, tile_k);
+ const size_t tile_range = range_i * tile_range_j * tile_range_k;
+ const struct pthreadpool_3d_tile_2d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ .range_j = range_j,
+ .tile_j = tile_j,
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .tile_range_j = fxdiv_init_size_t(tile_range_j),
+ .tile_range_k = fxdiv_init_size_t(tile_range_k),
+ };
+ thread_function_t parallelize_3d_tile_2d_with_uarch = &thread_parallelize_3d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_3d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_3d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ task(argument, i, j, k, l);
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_kl = range_k * range_l;
+ const size_t range = range_i * range_j * range_kl;
+ const struct pthreadpool_4d_params params = {
+ .range_k = range_k,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_kl = fxdiv_init_size_t(range_kl),
+ .range_l = fxdiv_init_size_t(range_l),
+ };
+ thread_function_t parallelize_4d = &thread_parallelize_4d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_4d = &pthreadpool_thread_parallelize_4d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_4d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l, min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_l = divide_round_up(range_l, tile_l);
+ const size_t tile_range_kl = range_k * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
+ const struct pthreadpool_4d_tile_1d_params params = {
+ .range_k = range_k,
+ .range_l = range_l,
+ .tile_l = tile_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_kl = fxdiv_init_size_t(tile_range_kl),
+ .tile_range_l = fxdiv_init_size_t(tile_range_l),
+ };
+ thread_function_t parallelize_4d_tile_1d = &thread_parallelize_4d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_1d = &pthreadpool_thread_parallelize_4d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_4d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_l = divide_round_up(range_l, tile_l);
+ const size_t tile_range_kl = divide_round_up(range_k, tile_k) * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
+ const struct pthreadpool_4d_tile_2d_params params = {
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .range_l = range_l,
+ .tile_l = tile_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_kl = fxdiv_init_size_t(tile_range_kl),
+ .tile_range_l = fxdiv_init_size_t(tile_range_l),
+ };
+ thread_function_t parallelize_4d_tile_2d = &thread_parallelize_4d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_2d = &pthreadpool_thread_parallelize_4d_tile_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_4d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+
+ uint32_t uarch_index = default_uarch_index;
+ #if PTHREADPOOL_USE_CPUINFO
+ uarch_index = cpuinfo_get_current_uarch_index_with_default(default_uarch_index);
+ if (uarch_index > max_uarch_index) {
+ uarch_index = default_uarch_index;
+ }
+ #endif
+
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, uarch_index, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_l = divide_round_up(range_l, tile_l);
+ const size_t tile_range_kl = divide_round_up(range_k, tile_k) * tile_range_l;
+ const size_t tile_range = range_i * range_j * tile_range_kl;
+ const struct pthreadpool_4d_tile_2d_with_uarch_params params = {
+ .default_uarch_index = default_uarch_index,
+ .max_uarch_index = max_uarch_index,
+ .range_k = range_k,
+ .tile_k = tile_k,
+ .range_l = range_l,
+ .tile_l = tile_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .tile_range_kl = fxdiv_init_size_t(tile_range_kl),
+ .tile_range_l = fxdiv_init_size_t(tile_range_l),
+ };
+ thread_function_t parallelize_4d_tile_2d_with_uarch = &thread_parallelize_4d_tile_2d_with_uarch;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_4d_tile_2d_with_uarch = &pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_4d_tile_2d_with_uarch, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l | range_m) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ task(argument, i, j, k, l, m);
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_lm = range_l * range_m;
+ const size_t range = range_i * range_j * range_k * range_lm;
+ const struct pthreadpool_5d_params params = {
+ .range_l = range_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ .range_lm = fxdiv_init_size_t(range_lm),
+ .range_m = fxdiv_init_size_t(range_m),
+ };
+ thread_function_t parallelize_5d = &thread_parallelize_5d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_5d = &pthreadpool_thread_parallelize_5d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_5d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m, min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_m = divide_round_up(range_m, tile_m);
+ const size_t range_kl = range_k * range_l;
+ const size_t tile_range = range_i * range_j * range_kl * tile_range_m;
+ const struct pthreadpool_5d_tile_1d_params params = {
+ .range_k = range_k,
+ .range_m = range_m,
+ .tile_m = tile_m,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_kl = fxdiv_init_size_t(range_kl),
+ .range_l = fxdiv_init_size_t(range_l),
+ .tile_range_m = fxdiv_init_size_t(tile_range_m),
+ };
+ thread_function_t parallelize_5d_tile_1d = &thread_parallelize_5d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_5d_tile_1d = &pthreadpool_thread_parallelize_5d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_5d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_l,
+ size_t tile_m,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k) <= 1 && range_l <= tile_l && range_m <= tile_m)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m,
+ min(range_l - l, tile_l), min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_m = divide_round_up(range_m, tile_m);
+ const size_t tile_range_lm = divide_round_up(range_l, tile_l) * tile_range_m;
+ const size_t tile_range = range_i * range_j * range_k * tile_range_lm;
+ const struct pthreadpool_5d_tile_2d_params params = {
+ .range_l = range_l,
+ .tile_l = tile_l,
+ .range_m = range_m,
+ .tile_m = tile_m,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ .tile_range_lm = fxdiv_init_size_t(tile_range_lm),
+ .tile_range_m = fxdiv_init_size_t(tile_range_m),
+ };
+ thread_function_t parallelize_5d_tile_2d = &thread_parallelize_5d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_5d_tile_2d = &pthreadpool_thread_parallelize_5d_tile_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_5d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_6d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j | range_k | range_l | range_m | range_n) <= 1) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ for (size_t n = 0; n < range_n; n++) {
+ task(argument, i, j, k, l, m, n);
+ }
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_lmn = range_l * range_m * range_n;
+ const size_t range = range_i * range_j * range_k * range_lmn;
+ const struct pthreadpool_6d_params params = {
+ .range_l = range_l,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ .range_lmn = fxdiv_init_size_t(range_lmn),
+ .range_m = fxdiv_init_size_t(range_m),
+ .range_n = fxdiv_init_size_t(range_n),
+ };
+ thread_function_t parallelize_6d = &thread_parallelize_6d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (range < range_threshold) {
+ parallelize_6d = &pthreadpool_thread_parallelize_6d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_6d, &params, sizeof(params),
+ task, argument, range, flags);
+ }
+}
+
+void pthreadpool_parallelize_6d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ size_t tile_n,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l | range_m) <= 1 && range_n <= tile_n)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ for (size_t n = 0; n < range_n; n += tile_n) {
+ task(argument, i, j, k, l, m, n, min(range_n - n, tile_n));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t tile_range_n = divide_round_up(range_n, tile_n);
+ const size_t tile_range_lmn = range_l * range_m * tile_range_n;
+ const size_t tile_range = range_i * range_j * range_k * tile_range_lmn;
+ const struct pthreadpool_6d_tile_1d_params params = {
+ .range_l = range_l,
+ .range_n = range_n,
+ .tile_n = tile_n,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_k = fxdiv_init_size_t(range_k),
+ .tile_range_lmn = fxdiv_init_size_t(tile_range_lmn),
+ .range_m = fxdiv_init_size_t(range_m),
+ .tile_range_n = fxdiv_init_size_t(tile_range_n),
+ };
+ thread_function_t parallelize_6d_tile_1d = &thread_parallelize_6d_tile_1d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_6d_tile_1d = &pthreadpool_thread_parallelize_6d_tile_1d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_6d_tile_1d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
+
+void pthreadpool_parallelize_6d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ size_t tile_m,
+ size_t tile_n,
+ uint32_t flags)
+{
+ size_t threads_count;
+ if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || ((range_i | range_j | range_k | range_l) <= 1 && range_m <= tile_m && range_n <= tile_n)) {
+ /* No thread pool used: execute task sequentially on the calling thread */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ for (size_t n = 0; n < range_n; n += tile_n) {
+ task(argument, i, j, k, l, m, n,
+ min(range_m - m, tile_m), min(range_n - n, tile_n));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ } else {
+ const size_t range_kl = range_k * range_l;
+ const size_t tile_range_n = divide_round_up(range_n, tile_n);
+ const size_t tile_range_mn = divide_round_up(range_m, tile_m) * tile_range_n;
+ const size_t tile_range = range_i * range_j * range_kl * tile_range_mn;
+ const struct pthreadpool_6d_tile_2d_params params = {
+ .range_k = range_k,
+ .range_m = range_m,
+ .tile_m = tile_m,
+ .range_n = range_n,
+ .tile_n = tile_n,
+ .range_j = fxdiv_init_size_t(range_j),
+ .range_kl = fxdiv_init_size_t(range_kl),
+ .range_l = fxdiv_init_size_t(range_l),
+ .tile_range_mn = fxdiv_init_size_t(tile_range_mn),
+ .tile_range_n = fxdiv_init_size_t(tile_range_n),
+ };
+ thread_function_t parallelize_6d_tile_2d = &thread_parallelize_6d_tile_2d;
+ #if PTHREADPOOL_USE_FASTPATH
+ const size_t range_threshold = -threads_count;
+ if (tile_range < range_threshold) {
+ parallelize_6d_tile_2d = &pthreadpool_thread_parallelize_6d_tile_2d_fastpath;
+ }
+ #endif
+ pthreadpool_parallelize(
+ threadpool, parallelize_6d_tile_2d, &params, sizeof(params),
+ task, argument, tile_range, flags);
+ }
+}
diff --git a/src/pthreads.c b/src/pthreads.c
new file mode 100644
index 0000000..430ca79
--- /dev/null
+++ b/src/pthreads.c
@@ -0,0 +1,461 @@
+/* Standard C headers */
+#include <assert.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Configuration header */
+#include "threadpool-common.h"
+
+/* POSIX headers */
+#include <pthread.h>
+#include <unistd.h>
+
+/* Futex-specific headers */
+#if PTHREADPOOL_USE_FUTEX
+ #if defined(__linux__)
+ #include <sys/syscall.h>
+ #include <linux/futex.h>
+
+ /* Old Android NDKs do not define SYS_futex and FUTEX_PRIVATE_FLAG */
+ #ifndef SYS_futex
+ #define SYS_futex __NR_futex
+ #endif
+ #ifndef FUTEX_PRIVATE_FLAG
+ #define FUTEX_PRIVATE_FLAG 128
+ #endif
+ #elif defined(__EMSCRIPTEN__)
+ /* math.h for INFINITY constant */
+ #include <math.h>
+
+ #include <emscripten/threading.h>
+ #else
+ #error "Platform-specific implementation of futex_wait and futex_wake_all required"
+ #endif
+#endif
+
+/* Windows-specific headers */
+#ifdef _WIN32
+ #include <sysinfoapi.h>
+#endif
+
+/* Dependencies */
+#if PTHREADPOOL_USE_CPUINFO
+ #include <cpuinfo.h>
+#endif
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+
+#if PTHREADPOOL_USE_FUTEX
+ #if defined(__linux__)
+ static int futex_wait(pthreadpool_atomic_uint32_t* address, uint32_t value) {
+ return syscall(SYS_futex, address, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, NULL);
+ }
+
+ static int futex_wake_all(pthreadpool_atomic_uint32_t* address) {
+ return syscall(SYS_futex, address, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT_MAX);
+ }
+ #elif defined(__EMSCRIPTEN__)
+ static int futex_wait(pthreadpool_atomic_uint32_t* address, uint32_t value) {
+ return emscripten_futex_wait((volatile void*) address, value, INFINITY);
+ }
+
+ static int futex_wake_all(pthreadpool_atomic_uint32_t* address) {
+ return emscripten_futex_wake((volatile void*) address, INT_MAX);
+ }
+ #else
+ #error "Platform-specific implementation of futex_wait and futex_wake_all required"
+ #endif
+#endif
+
+static void checkin_worker_thread(struct pthreadpool* threadpool) {
+ #if PTHREADPOOL_USE_FUTEX
+ if (pthreadpool_decrement_fetch_relaxed_size_t(&threadpool->active_threads) == 0) {
+ pthreadpool_store_release_uint32_t(&threadpool->has_active_threads, 0);
+ futex_wake_all(&threadpool->has_active_threads);
+ }
+ #else
+ pthread_mutex_lock(&threadpool->completion_mutex);
+ if (pthreadpool_decrement_fetch_release_size_t(&threadpool->active_threads) == 0) {
+ pthread_cond_signal(&threadpool->completion_condvar);
+ }
+ pthread_mutex_unlock(&threadpool->completion_mutex);
+ #endif
+}
+
+static void wait_worker_threads(struct pthreadpool* threadpool) {
+ /* Initial check */
+ #if PTHREADPOOL_USE_FUTEX
+ uint32_t has_active_threads = pthreadpool_load_acquire_uint32_t(&threadpool->has_active_threads);
+ if (has_active_threads == 0) {
+ return;
+ }
+ #else
+ size_t active_threads = pthreadpool_load_acquire_size_t(&threadpool->active_threads);
+ if (active_threads == 0) {
+ return;
+ }
+ #endif
+
+ /* Spin-wait */
+ for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
+ pthreadpool_yield();
+
+ #if PTHREADPOOL_USE_FUTEX
+ has_active_threads = pthreadpool_load_acquire_uint32_t(&threadpool->has_active_threads);
+ if (has_active_threads == 0) {
+ return;
+ }
+ #else
+ active_threads = pthreadpool_load_acquire_size_t(&threadpool->active_threads);
+ if (active_threads == 0) {
+ return;
+ }
+ #endif
+ }
+
+ /* Fall-back to mutex/futex wait */
+ #if PTHREADPOOL_USE_FUTEX
+ while ((has_active_threads = pthreadpool_load_acquire_uint32_t(&threadpool->has_active_threads)) != 0) {
+ futex_wait(&threadpool->has_active_threads, 1);
+ }
+ #else
+ pthread_mutex_lock(&threadpool->completion_mutex);
+ while (pthreadpool_load_acquire_size_t(&threadpool->active_threads) != 0) {
+ pthread_cond_wait(&threadpool->completion_condvar, &threadpool->completion_mutex);
+ };
+ pthread_mutex_unlock(&threadpool->completion_mutex);
+ #endif
+}
+
+static uint32_t wait_for_new_command(
+ struct pthreadpool* threadpool,
+ uint32_t last_command,
+ uint32_t last_flags)
+{
+ uint32_t command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
+ if (command != last_command) {
+ return command;
+ }
+
+ if ((last_flags & PTHREADPOOL_FLAG_YIELD_WORKERS) == 0) {
+ /* Spin-wait loop */
+ for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
+ pthreadpool_yield();
+
+ command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
+ if (command != last_command) {
+ return command;
+ }
+ }
+ }
+
+ /* Spin-wait disabled or timed out, fall back to mutex/futex wait */
+ #if PTHREADPOOL_USE_FUTEX
+ do {
+ futex_wait(&threadpool->command, last_command);
+ command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
+ } while (command == last_command);
+ #else
+ /* Lock the command mutex */
+ pthread_mutex_lock(&threadpool->command_mutex);
+ /* Read the command */
+ while ((command = pthreadpool_load_acquire_uint32_t(&threadpool->command)) == last_command) {
+ /* Wait for new command */
+ pthread_cond_wait(&threadpool->command_condvar, &threadpool->command_mutex);
+ }
+ /* Read a new command */
+ pthread_mutex_unlock(&threadpool->command_mutex);
+ #endif
+ return command;
+}
+
+static void* thread_main(void* arg) {
+ struct thread_info* thread = (struct thread_info*) arg;
+ struct pthreadpool* threadpool = thread->threadpool;
+ uint32_t last_command = threadpool_command_init;
+ struct fpu_state saved_fpu_state = { 0 };
+ uint32_t flags = 0;
+
+ /* Check in */
+ checkin_worker_thread(threadpool);
+
+ /* Monitor new commands and act accordingly */
+ for (;;) {
+ uint32_t command = wait_for_new_command(threadpool, last_command, flags);
+ pthreadpool_fence_acquire();
+
+ flags = pthreadpool_load_relaxed_uint32_t(&threadpool->flags);
+
+ /* Process command */
+ switch (command & THREADPOOL_COMMAND_MASK) {
+ case threadpool_command_parallelize:
+ {
+ const thread_function_t thread_function =
+ (thread_function_t) pthreadpool_load_relaxed_void_p(&threadpool->thread_function);
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ thread_function(threadpool, thread);
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ break;
+ }
+ case threadpool_command_shutdown:
+ /* Exit immediately: the master thread is waiting on pthread_join */
+ return NULL;
+ case threadpool_command_init:
+ /* To inhibit compiler warning */
+ break;
+ }
+ /* Notify the master thread that we finished processing */
+ checkin_worker_thread(threadpool);
+ /* Update last command */
+ last_command = command;
+ };
+}
+
+struct pthreadpool* pthreadpool_create(size_t threads_count) {
+ #if PTHREADPOOL_USE_CPUINFO
+ if (!cpuinfo_initialize()) {
+ return NULL;
+ }
+ #endif
+
+ if (threads_count == 0) {
+ #if PTHREADPOOL_USE_CPUINFO
+ threads_count = cpuinfo_get_processors_count();
+ #elif defined(_SC_NPROCESSORS_ONLN)
+ threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN);
+ #if defined(__EMSCRIPTEN_PTHREADS__)
+ /* Limit the number of threads to 8 to match link-time PTHREAD_POOL_SIZE option */
+ if (threads_count >= 8) {
+ threads_count = 8;
+ }
+ #endif
+ #elif defined(_WIN32)
+ SYSTEM_INFO system_info;
+ ZeroMemory(&system_info, sizeof(system_info));
+ GetSystemInfo(&system_info);
+ threads_count = (size_t) system_info.dwNumberOfProcessors;
+ #else
+ #error "Platform-specific implementation of sysconf(_SC_NPROCESSORS_ONLN) required"
+ #endif
+ }
+
+ struct pthreadpool* threadpool = pthreadpool_allocate(threads_count);
+ if (threadpool == NULL) {
+ return NULL;
+ }
+ threadpool->threads_count = fxdiv_init_size_t(threads_count);
+ for (size_t tid = 0; tid < threads_count; tid++) {
+ threadpool->threads[tid].thread_number = tid;
+ threadpool->threads[tid].threadpool = threadpool;
+ }
+
+ /* Thread pool with a single thread computes everything on the caller thread. */
+ if (threads_count > 1) {
+ pthread_mutex_init(&threadpool->execution_mutex, NULL);
+ #if !PTHREADPOOL_USE_FUTEX
+ pthread_mutex_init(&threadpool->completion_mutex, NULL);
+ pthread_cond_init(&threadpool->completion_condvar, NULL);
+ pthread_mutex_init(&threadpool->command_mutex, NULL);
+ pthread_cond_init(&threadpool->command_condvar, NULL);
+ #endif
+
+ #if PTHREADPOOL_USE_FUTEX
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
+ #endif
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+
+ /* Caller thread serves as worker #0. Thus, we create system threads starting with worker #1. */
+ for (size_t tid = 1; tid < threads_count; tid++) {
+ pthread_create(&threadpool->threads[tid].thread_object, NULL, &thread_main, &threadpool->threads[tid]);
+ }
+
+ /* Wait until all threads initialize */
+ wait_worker_threads(threadpool);
+ }
+ return threadpool;
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_parallelize(
+ struct pthreadpool* threadpool,
+ thread_function_t thread_function,
+ const void* params,
+ size_t params_size,
+ void* task,
+ void* context,
+ size_t linear_range,
+ uint32_t flags)
+{
+ assert(threadpool != NULL);
+ assert(thread_function != NULL);
+ assert(task != NULL);
+ assert(linear_range > 1);
+
+ /* Protect the global threadpool structures */
+ pthread_mutex_lock(&threadpool->execution_mutex);
+
+ #if !PTHREADPOOL_USE_FUTEX
+ /* Lock the command variables to ensure that threads don't start processing before they observe complete command with all arguments */
+ pthread_mutex_lock(&threadpool->command_mutex);
+ #endif
+
+ /* Setup global arguments */
+ pthreadpool_store_relaxed_void_p(&threadpool->thread_function, (void*) thread_function);
+ pthreadpool_store_relaxed_void_p(&threadpool->task, task);
+ pthreadpool_store_relaxed_void_p(&threadpool->argument, context);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->flags, flags);
+
+ /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
+ const struct fxdiv_divisor_size_t threads_count = threadpool->threads_count;
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count.value - 1 /* caller thread */);
+ #if PTHREADPOOL_USE_FUTEX
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
+ #endif
+
+ if (params_size != 0) {
+ memcpy(&threadpool->params, params, params_size);
+ pthreadpool_fence_release();
+ }
+
+ /* Spread the work between threads */
+ const struct fxdiv_result_size_t range_params = fxdiv_divide_size_t(linear_range, threads_count);
+ size_t range_start = 0;
+ for (size_t tid = 0; tid < threads_count.value; tid++) {
+ struct thread_info* thread = &threadpool->threads[tid];
+ const size_t range_length = range_params.quotient + (size_t) (tid < range_params.remainder);
+ const size_t range_end = range_start + range_length;
+ pthreadpool_store_relaxed_size_t(&thread->range_start, range_start);
+ pthreadpool_store_relaxed_size_t(&thread->range_end, range_end);
+ pthreadpool_store_relaxed_size_t(&thread->range_length, range_length);
+
+ /* The next subrange starts where the previous ended */
+ range_start = range_end;
+ }
+
+ /*
+ * Update the threadpool command.
+ * Imporantly, do it after initializing command parameters (range, task, argument, flags)
+ * ~(threadpool->command | THREADPOOL_COMMAND_MASK) flips the bits not in command mask
+ * to ensure the unmasked command is different then the last command, because worker threads
+ * monitor for change in the unmasked command.
+ */
+ const uint32_t old_command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ const uint32_t new_command = ~(old_command | THREADPOOL_COMMAND_MASK) | threadpool_command_parallelize;
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated command parameters.
+ *
+ * Note: release semantics is necessary even with a conditional variable, because the workers might
+ * be waiting in a spin-loop rather than the conditional variable.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, new_command);
+ #if PTHREADPOOL_USE_FUTEX
+ /* Wake up the threads */
+ futex_wake_all(&threadpool->command);
+ #else
+ /* Unlock the command variables before waking up the threads for better performance */
+ pthread_mutex_unlock(&threadpool->command_mutex);
+
+ /* Wake up the threads */
+ pthread_cond_broadcast(&threadpool->command_condvar);
+ #endif
+
+ /* Save and modify FPU denormals control, if needed */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ /* Do computations as worker #0 */
+ thread_function(threadpool, &threadpool->threads[0]);
+
+ /* Restore FPU denormals control, if needed */
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+
+ /* Wait until the threads finish computation */
+ wait_worker_threads(threadpool);
+
+ /* Make changes by other threads visible to this thread */
+ pthreadpool_fence_acquire();
+
+ /* Unprotect the global threadpool structures */
+ pthread_mutex_unlock(&threadpool->execution_mutex);
+}
+
+void pthreadpool_destroy(struct pthreadpool* threadpool) {
+ if (threadpool != NULL) {
+ const size_t threads_count = threadpool->threads_count.value;
+ if (threads_count > 1) {
+ #if PTHREADPOOL_USE_FUTEX
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1);
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated active_threads/has_active_threads values.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, threadpool_command_shutdown);
+
+ /* Wake up worker threads */
+ futex_wake_all(&threadpool->command);
+ #else
+ /* Lock the command variable to ensure that threads don't shutdown until both command and active_threads are updated */
+ pthread_mutex_lock(&threadpool->command_mutex);
+
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated active_threads value.
+ *
+ * Note: the release fence inside pthread_mutex_unlock is insufficient,
+ * because the workers might be waiting in a spin-loop rather than the conditional variable.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, threadpool_command_shutdown);
+
+ /* Wake up worker threads */
+ pthread_cond_broadcast(&threadpool->command_condvar);
+
+ /* Commit the state changes and let workers start processing */
+ pthread_mutex_unlock(&threadpool->command_mutex);
+ #endif
+
+ /* Wait until all threads return */
+ for (size_t thread = 1; thread < threads_count; thread++) {
+ pthread_join(threadpool->threads[thread].thread_object, NULL);
+ }
+
+ /* Release resources */
+ pthread_mutex_destroy(&threadpool->execution_mutex);
+ #if !PTHREADPOOL_USE_FUTEX
+ pthread_mutex_destroy(&threadpool->completion_mutex);
+ pthread_cond_destroy(&threadpool->completion_condvar);
+ pthread_mutex_destroy(&threadpool->command_mutex);
+ pthread_cond_destroy(&threadpool->command_condvar);
+ #endif
+ }
+ #if PTHREADPOOL_USE_CPUINFO
+ cpuinfo_deinitialize();
+ #endif
+ pthreadpool_deallocate(threadpool);
+ }
+}
diff --git a/src/shim.c b/src/shim.c
new file mode 100644
index 0000000..39ec884
--- /dev/null
+++ b/src/shim.c
@@ -0,0 +1,472 @@
+/* Standard C headers */
+#include <stddef.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-utils.h"
+
+
+struct pthreadpool {
+};
+
+static const struct pthreadpool static_pthreadpool = { };
+
+
+struct pthreadpool* pthreadpool_create(size_t threads_count) {
+ if (threads_count <= 1) {
+ return (struct pthreadpool*) &static_pthreadpool;
+ }
+
+ return NULL;
+}
+
+size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
+ return 1;
+}
+
+void pthreadpool_parallelize_1d(
+ struct pthreadpool* threadpool,
+ pthreadpool_task_1d_t task,
+ void* argument,
+ size_t range,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range; i++) {
+ task(argument, i);
+ }
+}
+
+void pthreadpool_parallelize_1d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range; i++) {
+ task(argument, default_uarch_index, i);
+ }
+}
+
+void pthreadpool_parallelize_1d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_1d_tile_1d_t task,
+ void* argument,
+ size_t range,
+ size_t tile,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range; i += tile) {
+ task(argument, i, min(range - i, tile));
+ }
+}
+
+void pthreadpool_parallelize_2d(
+ struct pthreadpool* threadpool,
+ pthreadpool_task_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ task(argument, i, j);
+ }
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_j,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, i, j, min(range_j - j, tile_j));
+ }
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+}
+
+void pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_2d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t tile_i,
+ size_t tile_j,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i += tile_i) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ task(argument, default_uarch_index, i, j,
+ min(range_i - i, tile_i), min(range_j - j, tile_j));
+ }
+ }
+}
+
+void pthreadpool_parallelize_3d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ task(argument, i, j, k);
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k, min(range_k - k, tile_k));
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, i, j, k,
+ min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_3d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t tile_j,
+ size_t tile_k,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j += tile_j) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ task(argument, default_uarch_index, i, j, k,
+ min(range_j - j, tile_j), min(range_k - k, tile_k));
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_4d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ task(argument, i, j, k, l);
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l, min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ pthreadpool_t threadpool,
+ pthreadpool_task_4d_tile_2d_with_id_t task,
+ void* argument,
+ uint32_t default_uarch_index,
+ uint32_t max_uarch_index,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t tile_k,
+ size_t tile_l,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k += tile_k) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ task(argument, default_uarch_index, i, j, k, l,
+ min(range_k - k, tile_k), min(range_l - l, tile_l));
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_5d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ task(argument, i, j, k, l, m);
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_m,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m, min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_5d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_5d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t tile_l,
+ size_t tile_m,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l += tile_l) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ task(argument, i, j, k, l, m,
+ min(range_l - l, tile_l), min(range_m - m, tile_m));
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_6d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ for (size_t n = 0; n < range_n; n++) {
+ task(argument, i, j, k, l, m, n);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_6d_tile_1d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_tile_1d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ size_t tile_n,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m++) {
+ for (size_t n = 0; n < range_n; n += tile_n) {
+ task(argument, i, j, k, l, m, n, min(range_n - n, tile_n));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_parallelize_6d_tile_2d(
+ pthreadpool_t threadpool,
+ pthreadpool_task_6d_tile_2d_t task,
+ void* argument,
+ size_t range_i,
+ size_t range_j,
+ size_t range_k,
+ size_t range_l,
+ size_t range_m,
+ size_t range_n,
+ size_t tile_m,
+ size_t tile_n,
+ uint32_t flags)
+{
+ for (size_t i = 0; i < range_i; i++) {
+ for (size_t j = 0; j < range_j; j++) {
+ for (size_t k = 0; k < range_k; k++) {
+ for (size_t l = 0; l < range_l; l++) {
+ for (size_t m = 0; m < range_m; m += tile_m) {
+ for (size_t n = 0; n < range_n; n += tile_n) {
+ task(argument, i, j, k, l, m, n,
+ min(range_m - m, tile_m), min(range_n - n, tile_n));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void pthreadpool_destroy(struct pthreadpool* threadpool) {
+}
diff --git a/src/threadpool-atomics.h b/src/threadpool-atomics.h
new file mode 100644
index 0000000..23f943a
--- /dev/null
+++ b/src/threadpool-atomics.h
@@ -0,0 +1,832 @@
+#pragma once
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* SSE-specific headers */
+#if defined(__i386__) || defined(__i686__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+ #include <xmmintrin.h>
+#endif
+
+/* ARM-specific headers */
+#if defined(__ARM_ACLE)
+ #include <arm_acle.h>
+#endif
+
+/* MSVC-specific headers */
+#ifdef _MSC_VER
+ #include <intrin.h>
+#endif
+
+
+#if defined(__wasm__) && defined(__clang__)
+ /*
+ * Clang for WebAssembly target lacks stdatomic.h header,
+ * even though it supports the necessary low-level intrinsics.
+ * Thus, we implement pthreadpool atomic functions on top of
+ * low-level Clang-specific interfaces for this target.
+ */
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_RELAXED);
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_ACQUIRE);
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __c11_atomic_load(address, __ATOMIC_ACQUIRE);
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELAXED);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELEASE);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __c11_atomic_store(address, value, __ATOMIC_RELEASE);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __c11_atomic_fetch_sub(address, 1, __ATOMIC_RELAXED) - 1;
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __c11_atomic_fetch_sub(address, 1, __ATOMIC_RELEASE) - 1;
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = __c11_atomic_load(value, __ATOMIC_RELAXED);
+ while (actual_value != 0) {
+ if (__c11_atomic_compare_exchange_weak(
+ value, &actual_value, actual_value - 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ __c11_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+
+ static inline void pthreadpool_fence_release() {
+ __c11_atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+ #include <stdatomic.h>
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return atomic_load_explicit(address, memory_order_relaxed);
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_acquire);
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return atomic_load_explicit(address, memory_order_acquire);
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ atomic_store_explicit(address, value, memory_order_relaxed);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_release);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ atomic_store_explicit(address, value, memory_order_release);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return atomic_fetch_sub_explicit(address, 1, memory_order_relaxed) - 1;
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return atomic_fetch_sub_explicit(address, 1, memory_order_release) - 1;
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
+ size_t actual_value;
+ do {
+ actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
+ if (actual_value == 0) {
+ __builtin_arm_clrex();
+ return false;
+ }
+ } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
+ return true;
+ #else
+ size_t actual_value = pthreadpool_load_relaxed_size_t(value);
+ while (actual_value != 0) {
+ if (atomic_compare_exchange_weak_explicit(
+ value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
+ {
+ return true;
+ }
+ }
+ return false;
+ #endif
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ atomic_thread_fence(memory_order_acquire);
+ }
+
+ static inline void pthreadpool_fence_release() {
+ atomic_thread_fence(memory_order_release);
+ }
+#elif defined(__GNUC__)
+ typedef uint32_t volatile pthreadpool_atomic_uint32_t;
+ typedef size_t volatile pthreadpool_atomic_size_t;
+ typedef void* volatile pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return *address;
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return *address;
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return *address;
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return *address;
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return *address;
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ *address = value;
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __sync_sub_and_fetch(address, 1);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return __sync_sub_and_fetch(address, 1);
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = *value;
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = __sync_val_compare_and_swap(value, expected_value, new_value);
+ if (actual_value == expected_value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ __sync_synchronize();
+ }
+
+ static inline void pthreadpool_fence_release() {
+ __sync_synchronize();
+ }
+#elif defined(_MSC_VER) && defined(_M_X64)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return *address;
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return *address;
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return *address;
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const uint32_t value = *address;
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const size_t value = *address;
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = *value;
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange64(
+ (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
+ if (actual_value == expected_value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ _mm_lfence();
+ _ReadBarrier();
+ }
+
+ static inline void pthreadpool_fence_release() {
+ _WriteBarrier();
+ _mm_sfence();
+ }
+#elif defined(_MSC_VER) && defined(_M_IX86)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return *address;
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return *address;
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return *address;
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ /* x86 loads always have acquire semantics; use only a compiler barrier */
+ const uint32_t value = *address;
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ /* x86 loads always have acquire semantics; use only a compiler barrier */
+ const size_t value = *address;
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ /* x86 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ /* x86 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement((volatile long*) address);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement((volatile long*) address);
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = *value;
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange(
+ (volatile long*) value, (long) new_value, (long) expected_value);
+ if (actual_value == expected_value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ _mm_lfence();
+ }
+
+ static inline void pthreadpool_fence_release() {
+ _mm_sfence();
+ }
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return (uint32_t) __iso_volatile_load32((const volatile __int32*) address);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) __iso_volatile_load64((const volatile __int64*) address);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return (void*) __iso_volatile_load64((const volatile __int64*) address);
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return (uint32_t) __ldar32((volatile unsigned __int32*) address);
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) __ldar64((volatile unsigned __int64*) address);
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __iso_volatile_store64((volatile __int64*) address, (__int64) value);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ __iso_volatile_store64((volatile __int64*) address, (__int64) value);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ _WriteBarrier();
+ __stlr32((unsigned __int32 volatile*) address, (unsigned __int32) value);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ _WriteBarrier();
+ __stlr64((unsigned __int64 volatile*) address, (unsigned __int64) value);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement64_nf((volatile __int64*) address);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement64_rel((volatile __int64*) address);
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = (size_t) __iso_volatile_load64((const volatile __int64*) value);
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange64_nf(
+ (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
+ if (actual_value == expected_value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ __dmb(_ARM64_BARRIER_ISHLD);
+ _ReadBarrier();
+ }
+
+ static inline void pthreadpool_fence_release() {
+ _WriteBarrier();
+ __dmb(_ARM64_BARRIER_ISH);
+ }
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
+
+ static inline uint32_t pthreadpool_load_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ return (uint32_t) __iso_volatile_load32((const volatile __int32*) address);
+ }
+
+ static inline size_t pthreadpool_load_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) __iso_volatile_load32((const volatile __int32*) address);
+ }
+
+ static inline void* pthreadpool_load_relaxed_void_p(
+ pthreadpool_atomic_void_p* address)
+ {
+ return (void*) __iso_volatile_load32((const volatile __int32*) address);
+ }
+
+ static inline uint32_t pthreadpool_load_acquire_uint32_t(
+ pthreadpool_atomic_uint32_t* address)
+ {
+ const uint32_t value = (uint32_t) __iso_volatile_load32((const volatile __int32*) address);
+ __dmb(_ARM_BARRIER_ISH);
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline size_t pthreadpool_load_acquire_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ const size_t value = (size_t) __iso_volatile_load32((const volatile __int32*) address);
+ __dmb(_ARM_BARRIER_ISH);
+ _ReadBarrier();
+ return value;
+ }
+
+ static inline void pthreadpool_store_relaxed_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline void pthreadpool_store_relaxed_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline void pthreadpool_store_relaxed_void_p(
+ pthreadpool_atomic_void_p* address,
+ void* value)
+ {
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline void pthreadpool_store_release_uint32_t(
+ pthreadpool_atomic_uint32_t* address,
+ uint32_t value)
+ {
+ _WriteBarrier();
+ __dmb(_ARM_BARRIER_ISH);
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline void pthreadpool_store_release_size_t(
+ pthreadpool_atomic_size_t* address,
+ size_t value)
+ {
+ _WriteBarrier();
+ __dmb(_ARM_BARRIER_ISH);
+ __iso_volatile_store32((volatile __int32*) address, (__int32) value);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement_nf((volatile long*) address);
+ }
+
+ static inline size_t pthreadpool_decrement_fetch_release_size_t(
+ pthreadpool_atomic_size_t* address)
+ {
+ return (size_t) _InterlockedDecrement_rel((volatile long*) address);
+ }
+
+ static inline bool pthreadpool_try_decrement_relaxed_size_t(
+ pthreadpool_atomic_size_t* value)
+ {
+ size_t actual_value = (size_t) __iso_volatile_load32((const volatile __int32*) value);
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange_nf(
+ (volatile long*) value, (long) new_value, (long) expected_value);
+ if (actual_value == expected_value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static inline void pthreadpool_fence_acquire() {
+ __dmb(_ARM_BARRIER_ISH);
+ _ReadBarrier();
+ }
+
+ static inline void pthreadpool_fence_release() {
+ _WriteBarrier();
+ __dmb(_ARM_BARRIER_ISH);
+ }
+#else
+ #error "Platform-specific implementation of threadpool-atomics.h required"
+#endif
+
+#if defined(__i386__) || defined(__i686__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+ static inline void pthreadpool_yield() {
+ _mm_pause();
+ }
+#elif defined(__ARM_ACLE) || defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
+ static inline void pthreadpool_yield() {
+ __yield();
+ }
+#elif defined(__GNUC__) && (defined(__ARM_ARCH) && (__ARM_ARCH >= 7) || (defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6KZ__)) && !defined(__thumb__))
+ static inline void pthreadpool_yield() {
+ __asm__ __volatile__("yield");
+ }
+#else
+ static inline void pthreadpool_yield() {
+ pthreadpool_fence_acquire();
+ }
+#endif
diff --git a/src/threadpool-common.h b/src/threadpool-common.h
new file mode 100644
index 0000000..ca84744
--- /dev/null
+++ b/src/threadpool-common.h
@@ -0,0 +1,75 @@
+#pragma once
+
+#ifndef PTHREADPOOL_USE_CPUINFO
+ #define PTHREADPOOL_USE_CPUINFO 0
+#endif
+
+#ifndef PTHREADPOOL_USE_FUTEX
+ #if defined(__linux__)
+ #define PTHREADPOOL_USE_FUTEX 1
+ #elif defined(__EMSCRIPTEN__)
+ #define PTHREADPOOL_USE_FUTEX 1
+ #else
+ #define PTHREADPOOL_USE_FUTEX 0
+ #endif
+#endif
+
+#ifndef PTHREADPOOL_USE_GCD
+ #if defined(__APPLE__)
+ #define PTHREADPOOL_USE_GCD 1
+ #else
+ #define PTHREADPOOL_USE_GCD 0
+ #endif
+#endif
+
+#ifndef PTHREADPOOL_USE_EVENT
+ #if defined(_WIN32) || defined(__CYGWIN__)
+ #define PTHREADPOOL_USE_EVENT 1
+ #else
+ #define PTHREADPOOL_USE_EVENT 0
+ #endif
+#endif
+
+#ifndef PTHREADPOOL_USE_CONDVAR
+ #if PTHREADPOOL_USE_GCD || PTHREADPOOL_USE_FUTEX || PTHREADPOOL_USE_EVENT
+ #define PTHREADPOOL_USE_CONDVAR 0
+ #else
+ #define PTHREADPOOL_USE_CONDVAR 1
+ #endif
+#endif
+
+
+/* Number of iterations in spin-wait loop before going into futex/condvar wait */
+#define PTHREADPOOL_SPIN_WAIT_ITERATIONS 1000000
+
+#define PTHREADPOOL_CACHELINE_SIZE 64
+#if defined(__GNUC__)
+ #define PTHREADPOOL_CACHELINE_ALIGNED __attribute__((__aligned__(PTHREADPOOL_CACHELINE_SIZE)))
+#elif defined(_MSC_VER)
+ #define PTHREADPOOL_CACHELINE_ALIGNED __declspec(align(PTHREADPOOL_CACHELINE_SIZE))
+#else
+ #error "Platform-specific implementation of PTHREADPOOL_CACHELINE_ALIGNED required"
+#endif
+
+#if defined(__clang__)
+ #if __has_extension(c_static_assert) || __has_feature(c_static_assert)
+ #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
+ #else
+ #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
+ #endif
+#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
+ /* Static assert is supported by gcc >= 4.6 */
+ #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
+#else
+ #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
+#endif
+
+#ifndef PTHREADPOOL_INTERNAL
+ #if defined(__ELF__)
+ #define PTHREADPOOL_INTERNAL __attribute__((__visibility__("internal")))
+ #elif defined(__MACH__)
+ #define PTHREADPOOL_INTERNAL __attribute__((__visibility__("hidden")))
+ #else
+ #define PTHREADPOOL_INTERNAL
+ #endif
+#endif
diff --git a/src/threadpool-object.h b/src/threadpool-object.h
new file mode 100644
index 0000000..590dc96
--- /dev/null
+++ b/src/threadpool-object.h
@@ -0,0 +1,812 @@
+#pragma once
+
+/* Standard C headers */
+#include <stddef.h>
+#include <stdint.h>
+
+/* Internal headers */
+#include "threadpool-common.h"
+#include "threadpool-atomics.h"
+
+/* POSIX headers */
+#if PTHREADPOOL_USE_CONDVAR || PTHREADPOOL_USE_FUTEX
+#include <pthread.h>
+#endif
+
+/* Mach headers */
+#if PTHREADPOOL_USE_GCD
+#include <dispatch/dispatch.h>
+#endif
+
+/* Windows headers */
+#if PTHREADPOOL_USE_EVENT
+#include <windows.h>
+#endif
+
+/* Dependencies */
+#include <fxdiv.h>
+
+/* Library header */
+#include <pthreadpool.h>
+
+
+#define THREADPOOL_COMMAND_MASK UINT32_C(0x7FFFFFFF)
+
+enum threadpool_command {
+ threadpool_command_init,
+ threadpool_command_parallelize,
+ threadpool_command_shutdown,
+};
+
+struct PTHREADPOOL_CACHELINE_ALIGNED thread_info {
+ /**
+ * Index of the first element in the work range.
+ * Before processing a new element the owning worker thread increments this value.
+ */
+ pthreadpool_atomic_size_t range_start;
+ /**
+ * Index of the element after the last element of the work range.
+ * Before processing a new element the stealing worker thread decrements this value.
+ */
+ pthreadpool_atomic_size_t range_end;
+ /**
+ * The number of elements in the work range.
+ * Due to race conditions range_length <= range_end - range_start.
+ * The owning worker thread must decrement this value before incrementing @a range_start.
+ * The stealing worker thread must decrement this value before decrementing @a range_end.
+ */
+ pthreadpool_atomic_size_t range_length;
+ /**
+ * Thread number in the 0..threads_count-1 range.
+ */
+ size_t thread_number;
+ /**
+ * Thread pool which owns the thread.
+ */
+ struct pthreadpool* threadpool;
+#if PTHREADPOOL_USE_CONDVAR || PTHREADPOOL_USE_FUTEX
+ /**
+ * The pthread object corresponding to the thread.
+ */
+ pthread_t thread_object;
+#endif
+#if PTHREADPOOL_USE_EVENT
+ /**
+ * The Windows thread handle corresponding to the thread.
+ */
+ HANDLE thread_handle;
+#endif
+};
+
+PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0,
+ "thread_info structure must occupy an integer number of cache lines (64 bytes)");
+
+struct pthreadpool_1d_with_uarch_params {
+ /**
+ * Copy of the default_uarch_index argument passed to the pthreadpool_parallelize_1d_with_uarch function.
+ */
+ uint32_t default_uarch_index;
+ /**
+ * Copy of the max_uarch_index argument passed to the pthreadpool_parallelize_1d_with_uarch function.
+ */
+ uint32_t max_uarch_index;
+};
+
+struct pthreadpool_1d_tile_1d_params {
+ /**
+ * Copy of the range argument passed to the pthreadpool_parallelize_1d_tile_1d function.
+ */
+ size_t range;
+ /**
+ * Copy of the tile argument passed to the pthreadpool_parallelize_1d_tile_1d function.
+ */
+ size_t tile;
+};
+
+struct pthreadpool_2d_params {
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_2d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+};
+
+struct pthreadpool_2d_tile_1d_params {
+ /**
+ * Copy of the range_j argument passed to the pthreadpool_parallelize_2d_tile_1d function.
+ */
+ size_t range_j;
+ /**
+ * Copy of the tile_j argument passed to the pthreadpool_parallelize_2d_tile_1d function.
+ */
+ size_t tile_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_j, tile_j) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_j;
+};
+
+struct pthreadpool_2d_tile_2d_params {
+ /**
+ * Copy of the range_i argument passed to the pthreadpool_parallelize_2d_tile_2d function.
+ */
+ size_t range_i;
+ /**
+ * Copy of the tile_i argument passed to the pthreadpool_parallelize_2d_tile_2d function.
+ */
+ size_t tile_i;
+ /**
+ * Copy of the range_j argument passed to the pthreadpool_parallelize_2d_tile_2d function.
+ */
+ size_t range_j;
+ /**
+ * Copy of the tile_j argument passed to the pthreadpool_parallelize_2d_tile_2d function.
+ */
+ size_t tile_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_j, tile_j) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_j;
+};
+
+struct pthreadpool_2d_tile_2d_with_uarch_params {
+ /**
+ * Copy of the default_uarch_index argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ uint32_t default_uarch_index;
+ /**
+ * Copy of the max_uarch_index argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ uint32_t max_uarch_index;
+ /**
+ * Copy of the range_i argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ size_t range_i;
+ /**
+ * Copy of the tile_i argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ size_t tile_i;
+ /**
+ * Copy of the range_j argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ size_t range_j;
+ /**
+ * Copy of the tile_j argument passed to the pthreadpool_parallelize_2d_tile_2d_with_uarch function.
+ */
+ size_t tile_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_j, tile_j) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_j;
+};
+
+struct pthreadpool_3d_params {
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_3d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_3d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+};
+
+struct pthreadpool_3d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ size_t tile_k;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_3d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_k;
+};
+
+struct pthreadpool_3d_tile_2d_params {
+ /**
+ * Copy of the range_j argument passed to the pthreadpool_parallelize_3d_tile_2d function.
+ */
+ size_t range_j;
+ /**
+ * Copy of the tile_j argument passed to the pthreadpool_parallelize_3d_tile_2d function.
+ */
+ size_t tile_j;
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_3d_tile_2d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_3d_tile_2d function.
+ */
+ size_t tile_k;
+ /**
+ * FXdiv divisor for the divide_round_up(range_j, tile_j) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_k;
+};
+
+struct pthreadpool_3d_tile_2d_with_uarch_params {
+ /**
+ * Copy of the default_uarch_index argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ uint32_t default_uarch_index;
+ /**
+ * Copy of the max_uarch_index argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ uint32_t max_uarch_index;
+ /**
+ * Copy of the range_j argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ size_t range_j;
+ /**
+ * Copy of the tile_j argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ size_t tile_j;
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_3d_tile_2d_with_uarch function.
+ */
+ size_t tile_k;
+ /**
+ * FXdiv divisor for the divide_round_up(range_j, tile_j) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_k;
+};
+
+struct pthreadpool_4d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d function.
+ */
+ size_t range_k;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * range_l value.
+ */
+ struct fxdiv_divisor_size_t range_kl;
+ /**
+ * FXdiv divisor for the range_l argument passed to the pthreadpool_parallelize_4d function.
+ */
+ struct fxdiv_divisor_size_t range_l;
+};
+
+struct pthreadpool_4d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the tile_l argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ size_t tile_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_kl;
+ /**
+ * FXdiv divisor for the divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_l;
+};
+
+struct pthreadpool_4d_tile_2d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d_tile_2d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_4d_tile_2d function.
+ */
+ size_t tile_k;
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_4d_tile_2d function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the tile_l argument passed to the pthreadpool_parallelize_4d_tile_2d function.
+ */
+ size_t tile_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d_tile_2d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) * divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_kl;
+ /**
+ * FXdiv divisor for the divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_l;
+};
+
+struct pthreadpool_4d_tile_2d_with_uarch_params {
+ /**
+ * Copy of the default_uarch_index argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ uint32_t default_uarch_index;
+ /**
+ * Copy of the max_uarch_index argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ uint32_t max_uarch_index;
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the tile_k argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ size_t tile_k;
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the tile_l argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ size_t tile_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_4d_tile_2d_with_uarch function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the divide_round_up(range_k, tile_k) * divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_kl;
+ /**
+ * FXdiv divisor for the divide_round_up(range_l, tile_l) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_l;
+};
+
+struct pthreadpool_5d_params {
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_5d function.
+ */
+ size_t range_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+ /**
+ * FXdiv divisor for the range_l * range_m value.
+ */
+ struct fxdiv_divisor_size_t range_lm;
+ /**
+ * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_5d function.
+ */
+ struct fxdiv_divisor_size_t range_m;
+};
+
+struct pthreadpool_5d_tile_1d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the range_m argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t range_m;
+ /**
+ * Copy of the tile_m argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ size_t tile_m;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * range_l value.
+ */
+ struct fxdiv_divisor_size_t range_kl;
+ /**
+ * FXdiv divisor for the range_l argument passed to the pthreadpool_parallelize_5d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_l;
+ /**
+ * FXdiv divisor for the divide_round_up(range_m, tile_m) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_m;
+};
+
+struct pthreadpool_5d_tile_2d_params {
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the tile_l argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ size_t tile_l;
+ /**
+ * Copy of the range_m argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ size_t range_m;
+ /**
+ * Copy of the tile_m argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ size_t tile_m;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_5d_tile_2d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+ /**
+ * FXdiv divisor for the divide_round_up(range_l, tile_l) * divide_round_up(range_m, tile_m) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_lm;
+ /**
+ * FXdiv divisor for the divide_round_up(range_m, tile_m) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_m;
+};
+
+struct pthreadpool_6d_params {
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_6d function.
+ */
+ size_t range_l;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_6d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_6d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+ /**
+ * FXdiv divisor for the range_l * range_m * range_n value.
+ */
+ struct fxdiv_divisor_size_t range_lmn;
+ /**
+ * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_6d function.
+ */
+ struct fxdiv_divisor_size_t range_m;
+ /**
+ * FXdiv divisor for the range_n argument passed to the pthreadpool_parallelize_6d function.
+ */
+ struct fxdiv_divisor_size_t range_n;
+};
+
+struct pthreadpool_6d_tile_1d_params {
+ /**
+ * Copy of the range_l argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ size_t range_l;
+ /**
+ * Copy of the range_n argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ size_t range_n;
+ /**
+ * Copy of the tile_n argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ size_t tile_n;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_k;
+ /**
+ * FXdiv divisor for the range_l * range_m * divide_round_up(range_n, tile_n) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_lmn;
+ /**
+ * FXdiv divisor for the range_m argument passed to the pthreadpool_parallelize_6d_tile_1d function.
+ */
+ struct fxdiv_divisor_size_t range_m;
+ /**
+ * FXdiv divisor for the divide_round_up(range_n, tile_n) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_n;
+};
+
+struct pthreadpool_6d_tile_2d_params {
+ /**
+ * Copy of the range_k argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ size_t range_k;
+ /**
+ * Copy of the range_m argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ size_t range_m;
+ /**
+ * Copy of the tile_m argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ size_t tile_m;
+ /**
+ * Copy of the range_n argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ size_t range_n;
+ /**
+ * Copy of the tile_n argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ size_t tile_n;
+ /**
+ * FXdiv divisor for the range_j argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ struct fxdiv_divisor_size_t range_j;
+ /**
+ * FXdiv divisor for the range_k * range_l value.
+ */
+ struct fxdiv_divisor_size_t range_kl;
+ /**
+ * FXdiv divisor for the range_l argument passed to the pthreadpool_parallelize_6d_tile_2d function.
+ */
+ struct fxdiv_divisor_size_t range_l;
+ /**
+ * FXdiv divisor for the divide_round_up(range_m, tile_m) * divide_round_up(range_n, tile_n) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_mn;
+ /**
+ * FXdiv divisor for the divide_round_up(range_n, tile_n) value.
+ */
+ struct fxdiv_divisor_size_t tile_range_n;
+};
+
+struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
+#if !PTHREADPOOL_USE_GCD
+ /**
+ * The number of threads that are processing an operation.
+ */
+ pthreadpool_atomic_size_t active_threads;
+#endif
+#if PTHREADPOOL_USE_FUTEX
+ /**
+ * Indicates if there are active threads.
+ * Only two values are possible:
+ * - has_active_threads == 0 if active_threads == 0
+ * - has_active_threads == 1 if active_threads != 0
+ */
+ pthreadpool_atomic_uint32_t has_active_threads;
+#endif
+#if !PTHREADPOOL_USE_GCD
+ /**
+ * The last command submitted to the thread pool.
+ */
+ pthreadpool_atomic_uint32_t command;
+#endif
+ /**
+ * The entry point function to call for each thread in the thread pool for parallelization tasks.
+ */
+ pthreadpool_atomic_void_p thread_function;
+ /**
+ * The function to call for each item.
+ */
+ pthreadpool_atomic_void_p task;
+ /**
+ * The first argument to the item processing function.
+ */
+ pthreadpool_atomic_void_p argument;
+ /**
+ * Additional parallelization parameters.
+ * These parameters are specific for each thread_function.
+ */
+ union {
+ struct pthreadpool_1d_with_uarch_params parallelize_1d_with_uarch;
+ struct pthreadpool_1d_tile_1d_params parallelize_1d_tile_1d;
+ struct pthreadpool_2d_params parallelize_2d;
+ struct pthreadpool_2d_tile_1d_params parallelize_2d_tile_1d;
+ struct pthreadpool_2d_tile_2d_params parallelize_2d_tile_2d;
+ struct pthreadpool_2d_tile_2d_with_uarch_params parallelize_2d_tile_2d_with_uarch;
+ struct pthreadpool_3d_params parallelize_3d;
+ struct pthreadpool_3d_tile_1d_params parallelize_3d_tile_1d;
+ struct pthreadpool_3d_tile_2d_params parallelize_3d_tile_2d;
+ struct pthreadpool_3d_tile_2d_with_uarch_params parallelize_3d_tile_2d_with_uarch;
+ struct pthreadpool_4d_params parallelize_4d;
+ struct pthreadpool_4d_tile_1d_params parallelize_4d_tile_1d;
+ struct pthreadpool_4d_tile_2d_params parallelize_4d_tile_2d;
+ struct pthreadpool_4d_tile_2d_with_uarch_params parallelize_4d_tile_2d_with_uarch;
+ struct pthreadpool_5d_params parallelize_5d;
+ struct pthreadpool_5d_tile_1d_params parallelize_5d_tile_1d;
+ struct pthreadpool_5d_tile_2d_params parallelize_5d_tile_2d;
+ struct pthreadpool_6d_params parallelize_6d;
+ struct pthreadpool_6d_tile_1d_params parallelize_6d_tile_1d;
+ struct pthreadpool_6d_tile_2d_params parallelize_6d_tile_2d;
+ } params;
+ /**
+ * Copy of the flags passed to a parallelization function.
+ */
+ pthreadpool_atomic_uint32_t flags;
+#if PTHREADPOOL_USE_CONDVAR || PTHREADPOOL_USE_FUTEX
+ /**
+ * Serializes concurrent calls to @a pthreadpool_parallelize_* from different threads.
+ */
+ pthread_mutex_t execution_mutex;
+#endif
+#if PTHREADPOOL_USE_GCD
+ /**
+ * Serializes concurrent calls to @a pthreadpool_parallelize_* from different threads.
+ */
+ dispatch_semaphore_t execution_semaphore;
+#endif
+#if PTHREADPOOL_USE_EVENT
+ /**
+ * Serializes concurrent calls to @a pthreadpool_parallelize_* from different threads.
+ */
+ HANDLE execution_mutex;
+#endif
+#if PTHREADPOOL_USE_CONDVAR
+ /**
+ * Guards access to the @a active_threads variable.
+ */
+ pthread_mutex_t completion_mutex;
+ /**
+ * Condition variable to wait until all threads complete an operation (until @a active_threads is zero).
+ */
+ pthread_cond_t completion_condvar;
+ /**
+ * Guards access to the @a command variable.
+ */
+ pthread_mutex_t command_mutex;
+ /**
+ * Condition variable to wait for change of the @a command variable.
+ */
+ pthread_cond_t command_condvar;
+#endif
+#if PTHREADPOOL_USE_EVENT
+ /**
+ * Events to wait on until all threads complete an operation (until @a active_threads is zero).
+ * To avoid race conditions due to spin-lock synchronization, we use two events and switch event in use after every
+ * submitted command according to the high bit of the command word.
+ */
+ HANDLE completion_event[2];
+ /**
+ * Events to wait on for change of the @a command variable.
+ * To avoid race conditions due to spin-lock synchronization, we use two events and switch event in use after every
+ * submitted command according to the high bit of the command word.
+ */
+ HANDLE command_event[2];
+#endif
+ /**
+ * FXdiv divisor for the number of threads in the thread pool.
+ * This struct never change after pthreadpool_create.
+ */
+ struct fxdiv_divisor_size_t threads_count;
+ /**
+ * Thread information structures that immediately follow this structure.
+ */
+ struct thread_info threads[];
+};
+
+PTHREADPOOL_STATIC_ASSERT(sizeof(struct pthreadpool) % PTHREADPOOL_CACHELINE_SIZE == 0,
+ "pthreadpool structure must occupy an integer number of cache lines (64 bytes)");
+
+PTHREADPOOL_INTERNAL struct pthreadpool* pthreadpool_allocate(
+ size_t threads_count);
+
+PTHREADPOOL_INTERNAL void pthreadpool_deallocate(
+ struct pthreadpool* threadpool);
+
+typedef void (*thread_function_t)(struct pthreadpool* threadpool, struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_parallelize(
+ struct pthreadpool* threadpool,
+ thread_function_t thread_function,
+ const void* params,
+ size_t params_size,
+ void* task,
+ void* context,
+ size_t linear_range,
+ uint32_t flags);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_1d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_3d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_4d_tile_2d_with_uarch_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_5d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_1d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
+
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_6d_tile_2d_fastpath(
+ struct pthreadpool* threadpool,
+ struct thread_info* thread);
diff --git a/src/threadpool-pthreads.c b/src/threadpool-pthreads.c
deleted file mode 100644
index 6c6a6d4..0000000
--- a/src/threadpool-pthreads.c
+++ /dev/null
@@ -1,1209 +0,0 @@
-/* Standard C headers */
-#include <stdatomic.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-
-/* POSIX headers */
-#include <pthread.h>
-#include <unistd.h>
-
-/* Futex-specific headers */
-#ifndef PTHREADPOOL_USE_FUTEX
- #if defined(__linux__)
- #define PTHREADPOOL_USE_FUTEX 1
- #include <sys/syscall.h>
- #include <linux/futex.h>
-
- /* Old Android NDKs do not define SYS_futex and FUTEX_PRIVATE_FLAG */
- #ifndef SYS_futex
- #define SYS_futex __NR_futex
- #endif
- #ifndef FUTEX_PRIVATE_FLAG
- #define FUTEX_PRIVATE_FLAG 128
- #endif
- #elif defined(__native_client__)
- #define PTHREADPOOL_USE_FUTEX 1
- #include <irt.h>
- #else
- #define PTHREADPOOL_USE_FUTEX 0
- #endif
-#endif
-
-/* Dependencies */
-#include <fxdiv.h>
-
-/* Library header */
-#include <pthreadpool.h>
-
-/* Internal headers */
-#include "threadpool-utils.h"
-
-/* Number of iterations in spin-wait loop before going into futex/mutex wait */
-#define PTHREADPOOL_SPIN_WAIT_ITERATIONS 1000000
-
-#define PTHREADPOOL_CACHELINE_SIZE 64
-#define PTHREADPOOL_CACHELINE_ALIGNED __attribute__((__aligned__(PTHREADPOOL_CACHELINE_SIZE)))
-
-#if defined(__clang__)
- #if __has_extension(c_static_assert) || __has_feature(c_static_assert)
- #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
- #else
- #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
- #endif
-#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 6))
- /* Static assert is supported by gcc >= 4.6 */
- #define PTHREADPOOL_STATIC_ASSERT(predicate, message) _Static_assert((predicate), message)
-#else
- #define PTHREADPOOL_STATIC_ASSERT(predicate, message)
-#endif
-
-static inline size_t multiply_divide(size_t a, size_t b, size_t d) {
- #if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4)
- return (size_t) (((uint64_t) a) * ((uint64_t) b)) / ((uint64_t) d);
- #elif defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 8)
- return (size_t) (((__uint128_t) a) * ((__uint128_t) b)) / ((__uint128_t) d);
- #else
- #error "Unsupported platform"
- #endif
-}
-
-static inline size_t divide_round_up(size_t dividend, size_t divisor) {
- if (dividend % divisor == 0) {
- return dividend / divisor;
- } else {
- return dividend / divisor + 1;
- }
-}
-
-static inline size_t min(size_t a, size_t b) {
- return a < b ? a : b;
-}
-
-#if PTHREADPOOL_USE_FUTEX
- #if defined(__linux__)
- static int futex_wait(_Atomic uint32_t* address, uint32_t value) {
- return syscall(SYS_futex, address, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, NULL);
- }
-
- static int futex_wake_all(_Atomic uint32_t* address) {
- return syscall(SYS_futex, address, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT_MAX);
- }
- #elif defined(__native_client__)
- static struct nacl_irt_futex nacl_irt_futex = { 0 };
- static pthread_once_t nacl_init_guard = PTHREAD_ONCE_INIT;
- static void nacl_init(void) {
- nacl_interface_query(NACL_IRT_FUTEX_v0_1, &nacl_irt_futex, sizeof(nacl_irt_futex));
- }
-
- static int futex_wait(_Atomic uint32_t* address, uint32_t value) {
- return nacl_irt_futex.futex_wait_abs((_Atomic int*) address, (int) value, NULL);
- }
-
- static int futex_wake_all(_Atomic uint32_t* address) {
- int count;
- return nacl_irt_futex.futex_wake((_Atomic int*) address, INT_MAX, &count);
- }
- #else
- #error "Platform-specific implementation of futex_wait and futex_wake_all required"
- #endif
-#endif
-
-#define THREADPOOL_COMMAND_MASK UINT32_C(0x7FFFFFFF)
-
-enum threadpool_command {
- threadpool_command_init,
- threadpool_command_compute_1d,
- threadpool_command_shutdown,
-};
-
-struct PTHREADPOOL_CACHELINE_ALIGNED thread_info {
- /**
- * Index of the first element in the work range.
- * Before processing a new element the owning worker thread increments this value.
- */
- atomic_size_t range_start;
- /**
- * Index of the element after the last element of the work range.
- * Before processing a new element the stealing worker thread decrements this value.
- */
- atomic_size_t range_end;
- /**
- * The number of elements in the work range.
- * Due to race conditions range_length <= range_end - range_start.
- * The owning worker thread must decrement this value before incrementing @a range_start.
- * The stealing worker thread must decrement this value before decrementing @a range_end.
- */
- atomic_size_t range_length;
- /**
- * Thread number in the 0..threads_count-1 range.
- */
- size_t thread_number;
- /**
- * The pthread object corresponding to the thread.
- */
- pthread_t thread_object;
- /**
- * Condition variable used to wake up the thread.
- * When the thread is idle, it waits on this condition variable.
- */
- pthread_cond_t wakeup_condvar;
-};
-
-PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0, "thread_info structure must occupy an integer number of cache lines (64 bytes)");
-
-struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool {
- /**
- * The number of threads that are processing an operation.
- */
- atomic_size_t active_threads;
-#if PTHREADPOOL_USE_FUTEX
- /**
- * Indicates if there are active threads.
- * Only two values are possible:
- * - has_active_threads == 0 if active_threads == 0
- * - has_active_threads == 1 if active_threads != 0
- */
- _Atomic uint32_t has_active_threads;
-#endif
- /**
- * The last command submitted to the thread pool.
- */
- _Atomic uint32_t command;
- /**
- * The function to call for each item.
- */
- void *_Atomic task;
- /**
- * The first argument to the item processing function.
- */
- void *_Atomic argument;
- /**
- * Copy of the flags passed to parallelization function.
- */
- _Atomic uint32_t flags;
- /**
- * Serializes concurrent calls to @a pthreadpool_parallelize_* from different threads.
- */
- pthread_mutex_t execution_mutex;
-#if !PTHREADPOOL_USE_FUTEX
- /**
- * Guards access to the @a active_threads variable.
- */
- pthread_mutex_t completion_mutex;
- /**
- * Condition variable to wait until all threads complete an operation (until @a active_threads is zero).
- */
- pthread_cond_t completion_condvar;
- /**
- * Guards access to the @a command variable.
- */
- pthread_mutex_t command_mutex;
- /**
- * Condition variable to wait for change of the @a command variable.
- */
- pthread_cond_t command_condvar;
-#endif
- /**
- * The number of threads in the thread pool. Never changes after initialization.
- */
- size_t threads_count;
- /**
- * Thread information structures that immediately follow this structure.
- */
- struct thread_info threads[];
-};
-
-PTHREADPOOL_STATIC_ASSERT(sizeof(struct pthreadpool) % PTHREADPOOL_CACHELINE_SIZE == 0, "pthreadpool structure must occupy an integer number of cache lines (64 bytes)");
-
-static void checkin_worker_thread(struct pthreadpool* threadpool) {
- #if PTHREADPOOL_USE_FUTEX
- if (atomic_fetch_sub_explicit(&threadpool->active_threads, 1, memory_order_relaxed) == 1) {
- atomic_store_explicit(&threadpool->has_active_threads, 0, memory_order_release);
- futex_wake_all(&threadpool->has_active_threads);
- }
- #else
- pthread_mutex_lock(&threadpool->completion_mutex);
- if (atomic_fetch_sub_explicit(&threadpool->active_threads, 1, memory_order_relaxed) == 1) {
- pthread_cond_signal(&threadpool->completion_condvar);
- }
- pthread_mutex_unlock(&threadpool->completion_mutex);
- #endif
-}
-
-static void wait_worker_threads(struct pthreadpool* threadpool) {
- /* Initial check */
- #if PTHREADPOOL_USE_FUTEX
- uint32_t has_active_threads = atomic_load_explicit(&threadpool->has_active_threads, memory_order_relaxed);
- if (has_active_threads == 0) {
- return;
- }
- #else
- size_t active_threads = atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed);
- if (active_threads == 0) {
- return;
- }
- #endif
-
- /* Spin-wait */
- for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- atomic_thread_fence(memory_order_acquire);
-
- #if PTHREADPOOL_USE_FUTEX
- has_active_threads = atomic_load_explicit(&threadpool->has_active_threads, memory_order_relaxed);
- if (has_active_threads == 0) {
- return;
- }
- #else
- active_threads = atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed);
- if (active_threads == 0) {
- return;
- }
- #endif
- }
-
- /* Fall-back to mutex/futex wait */
- #if PTHREADPOOL_USE_FUTEX
- while ((has_active_threads = atomic_load(&threadpool->has_active_threads)) != 0) {
- futex_wait(&threadpool->has_active_threads, 1);
- }
- #else
- pthread_mutex_lock(&threadpool->completion_mutex);
- while (atomic_load_explicit(&threadpool->active_threads, memory_order_relaxed) != 0) {
- pthread_cond_wait(&threadpool->completion_condvar, &threadpool->completion_mutex);
- };
- pthread_mutex_unlock(&threadpool->completion_mutex);
- #endif
-}
-
-inline static bool atomic_decrement(atomic_size_t* value) {
- size_t actual_value = atomic_load_explicit(value, memory_order_relaxed);
- if (actual_value == 0) {
- return false;
- }
- while (!atomic_compare_exchange_weak_explicit(
- value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
- {
- if (actual_value == 0) {
- return false;
- }
- }
- return true;
-}
-
-inline static size_t modulo_decrement(uint32_t i, uint32_t n) {
- /* Wrap modulo n, if needed */
- if (i == 0) {
- i = n;
- }
- /* Decrement input variable */
- return i - 1;
-}
-
-static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
- const pthreadpool_task_1d_t task = (pthreadpool_task_1d_t) atomic_load_explicit(&threadpool->task, memory_order_relaxed);
- void *const argument = atomic_load_explicit(&threadpool->argument, memory_order_relaxed);
- /* Process thread's own range of items */
- size_t range_start = atomic_load_explicit(&thread->range_start, memory_order_relaxed);
- while (atomic_decrement(&thread->range_length)) {
- task(argument, range_start++);
- }
-
- /* There still may be other threads with work */
- const size_t thread_number = thread->thread_number;
- const size_t threads_count = threadpool->threads_count;
- for (size_t tid = modulo_decrement(thread_number, threads_count);
- tid != thread_number;
- tid = modulo_decrement(tid, threads_count))
- {
- struct thread_info* other_thread = &threadpool->threads[tid];
- while (atomic_decrement(&other_thread->range_length)) {
- const size_t item_id = atomic_fetch_sub_explicit(&other_thread->range_end, 1, memory_order_relaxed) - 1;
- task(argument, item_id);
- }
- }
- atomic_thread_fence(memory_order_release);
-}
-
-static uint32_t wait_for_new_command(
- struct pthreadpool* threadpool,
- uint32_t last_command)
-{
- uint32_t command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- if (command != last_command) {
- atomic_thread_fence(memory_order_acquire);
- return command;
- }
-
- /* Spin-wait loop */
- for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
- /* This fence serves as a sleep instruction */
- atomic_thread_fence(memory_order_acquire);
-
- command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- if (command != last_command) {
- atomic_thread_fence(memory_order_acquire);
- return command;
- }
- }
-
- /* Spin-wait timed out, fall back to mutex/futex wait */
- #if PTHREADPOOL_USE_FUTEX
- do {
- futex_wait(&threadpool->command, last_command);
- command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- } while (command == last_command);
- #else
- /* Lock the command mutex */
- pthread_mutex_lock(&threadpool->command_mutex);
- /* Read the command */
- while ((command = atomic_load_explicit(&threadpool->command, memory_order_relaxed)) == last_command) {
- /* Wait for new command */
- pthread_cond_wait(&threadpool->command_condvar, &threadpool->command_mutex);
- }
- /* Read a new command */
- pthread_mutex_unlock(&threadpool->command_mutex);
- #endif
- atomic_thread_fence(memory_order_acquire);
- return command;
-}
-
-static void* thread_main(void* arg) {
- struct thread_info* thread = (struct thread_info*) arg;
- struct pthreadpool* threadpool = ((struct pthreadpool*) (thread - thread->thread_number)) - 1;
- uint32_t last_command = threadpool_command_init;
- struct fpu_state saved_fpu_state = { 0 };
-
- /* Check in */
- checkin_worker_thread(threadpool);
-
- /* Monitor new commands and act accordingly */
- for (;;) {
- uint32_t command = wait_for_new_command(threadpool, last_command);
- const uint32_t flags = atomic_load_explicit(&threadpool->flags, memory_order_relaxed);
-
- /* Process command */
- switch (command & THREADPOOL_COMMAND_MASK) {
- case threadpool_command_compute_1d:
- {
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- thread_parallelize_1d(threadpool, thread);
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- break;
- }
- case threadpool_command_shutdown:
- /* Exit immediately: the master thread is waiting on pthread_join */
- return NULL;
- case threadpool_command_init:
- /* To inhibit compiler warning */
- break;
- }
- /* Notify the master thread that we finished processing */
- checkin_worker_thread(threadpool);
- /* Update last command */
- last_command = command;
- };
-}
-
-static struct pthreadpool* pthreadpool_allocate(size_t threads_count) {
- const size_t threadpool_size = sizeof(struct pthreadpool) + threads_count * sizeof(struct thread_info);
- struct pthreadpool* threadpool = NULL;
- #if defined(__ANDROID__)
- /*
- * Android didn't get posix_memalign until API level 17 (Android 4.2).
- * Use (otherwise obsolete) memalign function on Android platform.
- */
- threadpool = memalign(PTHREADPOOL_CACHELINE_SIZE, threadpool_size);
- if (threadpool == NULL) {
- return NULL;
- }
- #else
- if (posix_memalign((void**) &threadpool, PTHREADPOOL_CACHELINE_SIZE, threadpool_size) != 0) {
- return NULL;
- }
- #endif
- memset(threadpool, 0, threadpool_size);
- return threadpool;
-}
-
-struct pthreadpool* pthreadpool_create(size_t threads_count) {
-#if defined(__native_client__)
- pthread_once(&nacl_init_guard, nacl_init);
-#endif
-
- if (threads_count == 0) {
- threads_count = (size_t) sysconf(_SC_NPROCESSORS_ONLN);
- }
- struct pthreadpool* threadpool = pthreadpool_allocate(threads_count);
- if (threadpool == NULL) {
- return NULL;
- }
- threadpool->threads_count = threads_count;
- for (size_t tid = 0; tid < threads_count; tid++) {
- threadpool->threads[tid].thread_number = tid;
- }
-
- /* Thread pool with a single thread computes everything on the caller thread. */
- if (threads_count > 1) {
- pthread_mutex_init(&threadpool->execution_mutex, NULL);
- #if !PTHREADPOOL_USE_FUTEX
- pthread_mutex_init(&threadpool->completion_mutex, NULL);
- pthread_cond_init(&threadpool->completion_condvar, NULL);
- pthread_mutex_init(&threadpool->command_mutex, NULL);
- pthread_cond_init(&threadpool->command_condvar, NULL);
- #endif
-
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_relaxed);
- #endif
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_release);
-
- /* Caller thread serves as worker #0. Thus, we create system threads starting with worker #1. */
- for (size_t tid = 1; tid < threads_count; tid++) {
- pthread_create(&threadpool->threads[tid].thread_object, NULL, &thread_main, &threadpool->threads[tid]);
- }
-
- /* Wait until all threads initialize */
- wait_worker_threads(threadpool);
- }
- return threadpool;
-}
-
-size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
- if (threadpool == NULL) {
- return 1;
- } else {
- return threadpool->threads_count;
- }
-}
-
-void pthreadpool_parallelize_1d(
- struct pthreadpool* threadpool,
- pthreadpool_task_1d_t task,
- void* argument,
- size_t range,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range; i++) {
- task(argument, i);
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Protect the global threadpool structures */
- pthread_mutex_lock(&threadpool->execution_mutex);
-
- #if !PTHREADPOOL_USE_FUTEX
- /* Lock the command variables to ensure that threads don't start processing before they observe complete command with all arguments */
- pthread_mutex_lock(&threadpool->command_mutex);
- #endif
-
- /* Setup global arguments */
- atomic_store_explicit(&threadpool->task, task, memory_order_relaxed);
- atomic_store_explicit(&threadpool->argument, argument, memory_order_relaxed);
- atomic_store_explicit(&threadpool->flags, flags, memory_order_relaxed);
-
- /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_relaxed);
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_relaxed);
- #endif
-
- /* Spread the work between threads */
- for (size_t tid = 0; tid < threadpool->threads_count; tid++) {
- struct thread_info* thread = &threadpool->threads[tid];
- const size_t range_start = multiply_divide(range, tid, threadpool->threads_count);
- const size_t range_end = multiply_divide(range, tid + 1, threadpool->threads_count);
- atomic_store_explicit(&thread->range_start, range_start, memory_order_relaxed);
- atomic_store_explicit(&thread->range_end, range_end, memory_order_relaxed);
- atomic_store_explicit(&thread->range_length, range_end - range_start, memory_order_relaxed);
- }
-
- #if PTHREADPOOL_USE_FUTEX
- /*
- * Make new command parameters globally visible. Having this fence before updating the command is imporatnt: it
- * guarantees that if a worker thread observes new command value, it also observes the updated command parameters.
- */
- atomic_thread_fence(memory_order_release);
- #endif
-
- /*
- * Update the threadpool command.
- * Imporantly, do it after initializing command parameters (range, task, argument)
- * ~(threadpool->command | THREADPOOL_COMMAND_MASK) flips the bits not in command mask
- * to ensure the unmasked command is different then the last command, because worker threads
- * monitor for change in the unmasked command.
- */
- const uint32_t old_command = atomic_load_explicit(&threadpool->command, memory_order_relaxed);
- const uint32_t new_command = ~(old_command | THREADPOOL_COMMAND_MASK) | threadpool_command_compute_1d;
-
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(&threadpool->command, new_command, memory_order_release);
-
- /* Wake up the threads */
- futex_wake_all(&threadpool->command);
- #else
- atomic_store_explicit(&threadpool->command, new_command, memory_order_relaxed);
-
- /* Unlock the command variables before waking up the threads for better performance */
- pthread_mutex_unlock(&threadpool->command_mutex);
-
- /* Wake up the threads */
- pthread_cond_broadcast(&threadpool->command_condvar);
- #endif
-
- /* Save and modify FPU denormals control, if needed */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
-
- /* Do computations as worker #0 */
- thread_parallelize_1d(threadpool, &threadpool->threads[0]);
-
- /* Restore FPU denormals control, if needed */
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
-
- /* Wait until the threads finish computation */
- wait_worker_threads(threadpool);
-
- /* Make changes by other threads visible to this thread */
- atomic_thread_fence(memory_order_acquire);
-
- /* Unprotect the global threadpool structures */
- pthread_mutex_unlock(&threadpool->execution_mutex);
- }
-}
-
-struct compute_1d_tile_1d_context {
- pthreadpool_task_1d_tile_1d_t task;
- void* argument;
- size_t range;
- size_t tile;
-};
-
-static void compute_1d_tile_1d(const struct compute_1d_tile_1d_context* context, size_t linear_index) {
- const size_t tile_index = linear_index;
- const size_t index = tile_index * context->tile;
- const size_t tile = min(context->tile, context->range - index);
- context->task(context->argument, index, tile);
-}
-
-void pthreadpool_parallelize_1d_tile_1d(
- pthreadpool_t threadpool,
- pthreadpool_task_1d_tile_1d_t task,
- void* argument,
- size_t range,
- size_t tile,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range; i += tile) {
- task(argument, i, min(range - i, tile));
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range = divide_round_up(range, tile);
- struct compute_1d_tile_1d_context context = {
- .task = task,
- .argument = argument,
- .range = range,
- .tile = tile
- };
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_1d_tile_1d, &context, tile_range, flags);
- }
-}
-
-struct compute_2d_context {
- pthreadpool_task_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t range_j;
-};
-
-static void compute_2d(const struct compute_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t range_j = context->range_j;
- const struct fxdiv_result_size_t index = fxdiv_divide_size_t(linear_index, range_j);
- context->task(context->argument, index.quotient, index.remainder);
-}
-
-void pthreadpool_parallelize_2d(
- struct pthreadpool* threadpool,
- pthreadpool_task_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- task(argument, i, j);
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- struct compute_2d_context context = {
- .task = task,
- .argument = argument,
- .range_j = fxdiv_init_size_t(range_j)
- };
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d, &context, range_i * range_j, flags);
- }
-}
-
-struct compute_2d_tile_1d_context {
- pthreadpool_task_2d_tile_1d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_j;
- size_t range_i;
- size_t range_j;
- size_t tile_j;
-};
-
-static void compute_2d_tile_1d(const struct compute_2d_tile_1d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
- const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j);
- const size_t max_tile_j = context->tile_j;
- const size_t index_i = tile_index.quotient;
- const size_t index_j = tile_index.remainder * max_tile_j;
- const size_t tile_j = min(max_tile_j, context->range_j - index_j);
- context->task(context->argument, index_i, index_j, tile_j);
-}
-
-void pthreadpool_parallelize_2d_tile_1d(
- pthreadpool_t threadpool,
- pthreadpool_task_2d_tile_1d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t tile_j,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- task(argument, i, j, min(range_j - j, tile_j));
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_j = divide_round_up(range_j, tile_j);
- struct compute_2d_tile_1d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_j = fxdiv_init_size_t(tile_range_j),
- .range_i = range_i,
- .range_j = range_j,
- .tile_j = tile_j
- };
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d_tile_1d, &context, range_i * tile_range_j, flags);
- }
-}
-
-struct compute_2d_tile_2d_context {
- pthreadpool_task_2d_tile_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_j;
- size_t range_i;
- size_t range_j;
- size_t tile_i;
- size_t tile_j;
-};
-
-static void compute_2d_tile_2d(const struct compute_2d_tile_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
- const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j);
- const size_t max_tile_i = context->tile_i;
- const size_t max_tile_j = context->tile_j;
- const size_t index_i = tile_index.quotient * max_tile_i;
- const size_t index_j = tile_index.remainder * max_tile_j;
- const size_t tile_i = min(max_tile_i, context->range_i - index_i);
- const size_t tile_j = min(max_tile_j, context->range_j - index_j);
- context->task(context->argument, index_i, index_j, tile_i, tile_j);
-}
-
-void pthreadpool_parallelize_2d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_2d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t tile_i,
- size_t tile_j,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i += tile_i) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- task(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_i = divide_round_up(range_i, tile_i);
- const size_t tile_range_j = divide_round_up(range_j, tile_j);
- struct compute_2d_tile_2d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_j = fxdiv_init_size_t(tile_range_j),
- .range_i = range_i,
- .range_j = range_j,
- .tile_i = tile_i,
- .tile_j = tile_j
- };
- pthreadpool_parallelize_1d(threadpool, (pthreadpool_task_1d_t) compute_2d_tile_2d, &context, tile_range_i * tile_range_j, flags);
- }
-}
-
-struct compute_3d_tile_2d_context {
- pthreadpool_task_3d_tile_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_j;
- struct fxdiv_divisor_size_t tile_range_k;
- size_t range_j;
- size_t range_k;
- size_t tile_j;
- size_t tile_k;
-};
-
-static void compute_3d_tile_2d(const struct compute_3d_tile_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_k = context->tile_range_k;
- const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k);
- const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j;
- const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j);
- const size_t max_tile_j = context->tile_j;
- const size_t max_tile_k = context->tile_k;
- const size_t index_i = tile_index_i_j.quotient;
- const size_t index_j = tile_index_i_j.remainder * max_tile_j;
- const size_t index_k = tile_index_ij_k.remainder * max_tile_k;
- const size_t tile_j = min(max_tile_j, context->range_j - index_j);
- const size_t tile_k = min(max_tile_k, context->range_k - index_k);
- context->task(context->argument, index_i, index_j, index_k, tile_j, tile_k);
-}
-
-void pthreadpool_parallelize_3d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_3d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t tile_j,
- size_t tile_k,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- for (size_t k = 0; k < range_k; k += tile_k) {
- task(argument, i, j, k, min(range_j - j, tile_j), min(range_k - k, tile_k));
- }
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_j = divide_round_up(range_j, tile_j);
- const size_t tile_range_k = divide_round_up(range_k, tile_k);
- struct compute_3d_tile_2d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_j = fxdiv_init_size_t(tile_range_j),
- .tile_range_k = fxdiv_init_size_t(tile_range_k),
- .range_j = range_j,
- .range_k = range_k,
- .tile_j = tile_j,
- .tile_k = tile_k
- };
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_3d_tile_2d, &context,
- range_i * tile_range_j * tile_range_k, flags);
- }
-}
-
-struct compute_4d_tile_2d_context {
- pthreadpool_task_4d_tile_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_kl;
- struct fxdiv_divisor_size_t range_j;
- struct fxdiv_divisor_size_t tile_range_l;
- size_t range_k;
- size_t range_l;
- size_t tile_k;
- size_t tile_l;
-};
-
-static void compute_4d_tile_2d(const struct compute_4d_tile_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_kl = context->tile_range_kl;
- const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl);
- const struct fxdiv_divisor_size_t range_j = context->range_j;
- const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j);
- const struct fxdiv_divisor_size_t tile_range_l = context->tile_range_l;
- const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l);
- const size_t max_tile_k = context->tile_k;
- const size_t max_tile_l = context->tile_l;
- const size_t index_i = tile_index_i_j.quotient;
- const size_t index_j = tile_index_i_j.remainder;
- const size_t index_k = tile_index_k_l.quotient * max_tile_k;
- const size_t index_l = tile_index_k_l.remainder * max_tile_l;
- const size_t tile_k = min(max_tile_k, context->range_k - index_k);
- const size_t tile_l = min(max_tile_l, context->range_l - index_l);
- context->task(context->argument, index_i, index_j, index_k, index_l, tile_k, tile_l);
-}
-
-void pthreadpool_parallelize_4d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_4d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t tile_k,
- size_t tile_l,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k += tile_k) {
- for (size_t l = 0; l < range_l; l += tile_l) {
- task(argument, i, j, k, l,
- min(range_k - k, tile_k), min(range_l - l, tile_l));
- }
- }
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_k = divide_round_up(range_k, tile_k);
- const size_t tile_range_l = divide_round_up(range_l, tile_l);
- struct compute_4d_tile_2d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_kl = fxdiv_init_size_t(tile_range_k * tile_range_l),
- .range_j = fxdiv_init_size_t(range_j),
- .tile_range_l = fxdiv_init_size_t(tile_range_l),
- .range_k = range_k,
- .range_l = range_l,
- .tile_k = tile_k,
- .tile_l = tile_l
- };
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_4d_tile_2d, &context,
- range_i * range_j * tile_range_k * tile_range_l, flags);
- }
-}
-
-struct compute_5d_tile_2d_context {
- pthreadpool_task_5d_tile_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_lm;
- struct fxdiv_divisor_size_t range_k;
- struct fxdiv_divisor_size_t tile_range_m;
- struct fxdiv_divisor_size_t range_j;
- size_t range_l;
- size_t range_m;
- size_t tile_l;
- size_t tile_m;
-};
-
-static void compute_5d_tile_2d(const struct compute_5d_tile_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_lm = context->tile_range_lm;
- const struct fxdiv_result_size_t tile_index_ijk_lm = fxdiv_divide_size_t(linear_index, tile_range_lm);
- const struct fxdiv_divisor_size_t range_k = context->range_k;
- const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lm.quotient, range_k);
- const struct fxdiv_divisor_size_t tile_range_m = context->tile_range_m;
- const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_ijk_lm.remainder, tile_range_m);
- const struct fxdiv_divisor_size_t range_j = context->range_j;
- const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
-
- const size_t max_tile_l = context->tile_l;
- const size_t max_tile_m = context->tile_m;
- const size_t index_i = tile_index_i_j.quotient;
- const size_t index_j = tile_index_i_j.remainder;
- const size_t index_k = tile_index_ij_k.remainder;
- const size_t index_l = tile_index_l_m.quotient * max_tile_l;
- const size_t index_m = tile_index_l_m.remainder * max_tile_m;
- const size_t tile_l = min(max_tile_l, context->range_l - index_l);
- const size_t tile_m = min(max_tile_m, context->range_m - index_m);
- context->task(context->argument, index_i, index_j, index_k, index_l, index_m, tile_l, tile_m);
-}
-
-void pthreadpool_parallelize_5d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_5d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t range_m,
- size_t tile_l,
- size_t tile_m,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k++) {
- for (size_t l = 0; l < range_l; l += tile_l) {
- for (size_t m = 0; m < range_m; m += tile_m) {
- task(argument, i, j, k, l, m,
- min(range_l - l, tile_l), min(range_m - m, tile_m));
- }
- }
- }
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_l = divide_round_up(range_l, tile_l);
- const size_t tile_range_m = divide_round_up(range_m, tile_m);
- struct compute_5d_tile_2d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_lm = fxdiv_init_size_t(tile_range_l * tile_range_m),
- .range_k = fxdiv_init_size_t(range_k),
- .tile_range_m = fxdiv_init_size_t(tile_range_m),
- .range_j = fxdiv_init_size_t(range_j),
- .range_l = range_l,
- .range_m = range_m,
- .tile_l = tile_l,
- .tile_m = tile_m,
- };
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_5d_tile_2d, &context,
- range_i * range_j * range_k * tile_range_l * tile_range_m, flags);
- }
-}
-
-struct compute_6d_tile_2d_context {
- pthreadpool_task_6d_tile_2d_t task;
- void* argument;
- struct fxdiv_divisor_size_t tile_range_lmn;
- struct fxdiv_divisor_size_t range_k;
- struct fxdiv_divisor_size_t tile_range_n;
- struct fxdiv_divisor_size_t range_j;
- struct fxdiv_divisor_size_t tile_range_m;
- size_t range_m;
- size_t range_n;
- size_t tile_m;
- size_t tile_n;
-};
-
-static void compute_6d_tile_2d(const struct compute_6d_tile_2d_context* context, size_t linear_index) {
- const struct fxdiv_divisor_size_t tile_range_lmn = context->tile_range_lmn;
- const struct fxdiv_result_size_t tile_index_ijk_lmn = fxdiv_divide_size_t(linear_index, tile_range_lmn);
- const struct fxdiv_divisor_size_t range_k = context->range_k;
- const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(tile_index_ijk_lmn.quotient, range_k);
- const struct fxdiv_divisor_size_t tile_range_n = context->tile_range_n;
- const struct fxdiv_result_size_t tile_index_lm_n = fxdiv_divide_size_t(tile_index_ijk_lmn.remainder, tile_range_n);
- const struct fxdiv_divisor_size_t range_j = context->range_j;
- const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, range_j);
- const struct fxdiv_divisor_size_t tile_range_m = context->tile_range_m;
- const struct fxdiv_result_size_t tile_index_l_m = fxdiv_divide_size_t(tile_index_lm_n.quotient, tile_range_m);
-
- const size_t max_tile_m = context->tile_m;
- const size_t max_tile_n = context->tile_n;
- const size_t index_i = tile_index_i_j.quotient;
- const size_t index_j = tile_index_i_j.remainder;
- const size_t index_k = tile_index_ij_k.remainder;
- const size_t index_l = tile_index_l_m.quotient;
- const size_t index_m = tile_index_l_m.remainder * max_tile_m;
- const size_t index_n = tile_index_lm_n.remainder * max_tile_n;
- const size_t tile_m = min(max_tile_m, context->range_m - index_m);
- const size_t tile_n = min(max_tile_n, context->range_n - index_n);
- context->task(context->argument, index_i, index_j, index_k, index_l, index_m, index_n, tile_m, tile_n);
-}
-
-void pthreadpool_parallelize_6d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_6d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t range_m,
- size_t range_n,
- size_t tile_m,
- size_t tile_n,
- uint32_t flags)
-{
- if (threadpool == NULL || threadpool->threads_count <= 1) {
- /* No thread pool used: execute task sequentially on the calling thread */
- struct fpu_state saved_fpu_state = { 0 };
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- saved_fpu_state = get_fpu_state();
- disable_fpu_denormals();
- }
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k++) {
- for (size_t l = 0; l < range_l; l++) {
- for (size_t m = 0; m < range_m; m += tile_m) {
- for (size_t n = 0; n < range_n; n += tile_n) {
- task(argument, i, j, k, l, m, n,
- min(range_m - m, tile_m), min(range_n - n, tile_n));
- }
- }
- }
- }
- }
- }
- if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
- set_fpu_state(saved_fpu_state);
- }
- } else {
- /* Execute in parallel on the thread pool using linearized index */
- const size_t tile_range_m = divide_round_up(range_m, tile_m);
- const size_t tile_range_n = divide_round_up(range_n, tile_n);
- struct compute_6d_tile_2d_context context = {
- .task = task,
- .argument = argument,
- .tile_range_lmn = fxdiv_init_size_t(range_l * tile_range_m * tile_range_n),
- .range_k = fxdiv_init_size_t(range_k),
- .tile_range_n = fxdiv_init_size_t(tile_range_n),
- .range_j = fxdiv_init_size_t(range_j),
- .tile_range_m = fxdiv_init_size_t(tile_range_m),
- .range_m = range_m,
- .range_n = range_n,
- .tile_m = tile_m,
- .tile_n = tile_n,
- };
- pthreadpool_parallelize_1d(threadpool,
- (pthreadpool_task_1d_t) compute_6d_tile_2d, &context,
- range_i * range_j * range_k * range_l * tile_range_m * tile_range_n, flags);
- }
-}
-
-void pthreadpool_destroy(struct pthreadpool* threadpool) {
- if (threadpool != NULL) {
- if (threadpool->threads_count > 1) {
- #if PTHREADPOOL_USE_FUTEX
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_relaxed);
- atomic_store_explicit(&threadpool->has_active_threads, 1, memory_order_release);
-
- atomic_store_explicit(&threadpool->command, threadpool_command_shutdown, memory_order_release);
-
- /* Wake up worker threads */
- futex_wake_all(&threadpool->command);
- #else
- /* Lock the command variable to ensure that threads don't shutdown until both command and active_threads are updated */
- pthread_mutex_lock(&threadpool->command_mutex);
-
- /* Locking of completion_mutex not needed: readers are sleeping on command_condvar */
- atomic_store_explicit(
- &threadpool->active_threads, threadpool->threads_count - 1 /* caller thread */, memory_order_release);
-
- /* Update the threadpool command. */
- atomic_store_explicit(&threadpool->command, threadpool_command_shutdown, memory_order_release);
-
- /* Wake up worker threads */
- pthread_cond_broadcast(&threadpool->command_condvar);
-
- /* Commit the state changes and let workers start processing */
- pthread_mutex_unlock(&threadpool->command_mutex);
- #endif
-
- /* Wait until all threads return */
- for (size_t thread = 1; thread < threadpool->threads_count; thread++) {
- pthread_join(threadpool->threads[thread].thread_object, NULL);
- }
-
- /* Release resources */
- pthread_mutex_destroy(&threadpool->execution_mutex);
- #if !PTHREADPOOL_USE_FUTEX
- pthread_mutex_destroy(&threadpool->completion_mutex);
- pthread_cond_destroy(&threadpool->completion_condvar);
- pthread_mutex_destroy(&threadpool->command_mutex);
- pthread_cond_destroy(&threadpool->command_condvar);
- #endif
- }
- free(threadpool);
- }
-}
diff --git a/src/threadpool-shim.c b/src/threadpool-shim.c
deleted file mode 100644
index c8ef51d..0000000
--- a/src/threadpool-shim.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/* Standard C headers */
-#include <stddef.h>
-
-/* Library header */
-#include <pthreadpool.h>
-
-static inline size_t min(size_t a, size_t b) {
- return a < b ? a : b;
-}
-
-struct pthreadpool* pthreadpool_create(size_t threads_count) {
- return NULL;
-}
-
-size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) {
- return 1;
-}
-
-void pthreadpool_parallelize_1d(
- struct pthreadpool* threadpool,
- pthreadpool_task_1d_t task,
- void* argument,
- size_t range,
- uint32_t flags)
-{
- for (size_t i = 0; i < range; i++) {
- task(argument, i);
- }
-}
-
-void pthreadpool_parallelize_1d_tile_1d(
- pthreadpool_t threadpool,
- pthreadpool_task_1d_tile_1d_t task,
- void* argument,
- size_t range,
- size_t tile,
- uint32_t flags)
-{
- for (size_t i = 0; i < range; i += tile) {
- task(argument, i, min(range - i, tile));
- }
-}
-
-void pthreadpool_parallelize_2d(
- struct pthreadpool* threadpool,
- pthreadpool_task_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- task(argument, i, j);
- }
- }
-}
-
-void pthreadpool_parallelize_2d_tile_1d(
- pthreadpool_t threadpool,
- pthreadpool_task_2d_tile_1d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t tile_j,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- task(argument, i, j, min(range_j - j, tile_j));
- }
- }
-}
-
-void pthreadpool_parallelize_2d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_2d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t tile_i,
- size_t tile_j,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i += tile_i) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- task(argument, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j));
- }
- }
-}
-
-void pthreadpool_parallelize_3d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_3d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t tile_j,
- size_t tile_k,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j += tile_j) {
- for (size_t k = 0; k < range_k; k += tile_k) {
- task(argument, i, j, k,
- min(range_j - j, tile_j), min(range_k - k, tile_k));
- }
- }
- }
-}
-
-void pthreadpool_parallelize_4d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_4d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t tile_k,
- size_t tile_l,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k += tile_k) {
- for (size_t l = 0; l < range_l; l += tile_l) {
- task(argument, i, j, k, l,
- min(range_k - k, tile_k), min(range_l - l, tile_l));
- }
- }
- }
- }
-}
-
-void pthreadpool_parallelize_5d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_5d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t range_m,
- size_t tile_l,
- size_t tile_m,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k++) {
- for (size_t l = 0; l < range_l; l += tile_l) {
- for (size_t m = 0; m < range_m; m += tile_m) {
- task(argument, i, j, k, l, m,
- min(range_l - l, tile_l), min(range_m - m, tile_m));
- }
- }
- }
- }
- }
-}
-
-void pthreadpool_parallelize_6d_tile_2d(
- pthreadpool_t threadpool,
- pthreadpool_task_6d_tile_2d_t task,
- void* argument,
- size_t range_i,
- size_t range_j,
- size_t range_k,
- size_t range_l,
- size_t range_m,
- size_t range_n,
- size_t tile_m,
- size_t tile_n,
- uint32_t flags)
-{
- for (size_t i = 0; i < range_i; i++) {
- for (size_t j = 0; j < range_j; j++) {
- for (size_t k = 0; k < range_k; k++) {
- for (size_t l = 0; l < range_l; l++) {
- for (size_t m = 0; m < range_m; m += tile_m) {
- for (size_t n = 0; n < range_n; n += tile_n) {
- task(argument, i, j, k, l, m, n,
- min(range_m - m, tile_m), min(range_n - n, tile_n));
- }
- }
- }
- }
- }
- }
-}
-
-void pthreadpool_destroy(struct pthreadpool* threadpool) {
-}
diff --git a/src/threadpool-utils.h b/src/threadpool-utils.h
index 65c7fb0..91e2445 100644
--- a/src/threadpool-utils.h
+++ b/src/threadpool-utils.h
@@ -1,17 +1,25 @@
#pragma once
#include <stdint.h>
+#include <stddef.h>
-#if defined(__SSE__) || defined(__x86_64__)
-#include <xmmintrin.h>
+/* SSE-specific headers */
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+ #include <xmmintrin.h>
#endif
+/* MSVC-specific headers */
+#if defined(_MSC_VER)
+ #include <intrin.h>
+#endif
+
+
struct fpu_state {
-#if defined(__SSE__) || defined(__x86_64__)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
uint32_t mxcsr;
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0) || defined(_MSC_VER) && defined(_M_ARM)
uint32_t fpscr;
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__) || defined(_MSC_VER) && defined(_M_ARM64)
uint64_t fpcr;
#else
char unused;
@@ -20,37 +28,63 @@ struct fpu_state {
static inline struct fpu_state get_fpu_state() {
struct fpu_state state = { 0 };
-#if defined(__SSE__) || defined(__x86_64__)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
state.mxcsr = (uint32_t) _mm_getcsr();
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ state.fpscr = (uint32_t) _MoveFromCoprocessor(10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ state.fpcr = (uint64_t) _ReadStatusReg(0x5A20);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
__asm__ __volatile__("VMRS %[fpscr], fpscr" : [fpscr] "=r" (state.fpscr));
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__)
__asm__ __volatile__("MRS %[fpcr], fpcr" : [fpcr] "=r" (state.fpcr));
#endif
return state;
}
static inline void set_fpu_state(const struct fpu_state state) {
-#if defined(__SSE__) || defined(__x86_64__)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
_mm_setcsr((unsigned int) state.mxcsr);
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ _MoveToCoprocessor((int) state.fpscr, 10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ _WriteStatusReg(0x5A20, (__int64) state.fpcr);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
__asm__ __volatile__("VMSR fpscr, %[fpscr]" : : [fpscr] "r" (state.fpscr));
-#elif defined(__aarch64__)
+#elif defined(__GNUC__) && defined(__aarch64__)
__asm__ __volatile__("MSR fpcr, %[fpcr]" : : [fpcr] "r" (state.fpcr));
#endif
}
static inline void disable_fpu_denormals() {
-#if defined(__SSE__) || defined(__x86_64__)
+#if defined(__SSE__) || defined(__x86_64__) || defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
_mm_setcsr(_mm_getcsr() | 0x8040);
-#elif defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
+#elif defined(_MSC_VER) && defined(_M_ARM)
+ int fpscr = _MoveFromCoprocessor(10, 7, 1, 0, 0);
+ fpscr |= 0x1000000;
+ _MoveToCoprocessor(fpscr, 10, 7, 1, 0, 0);
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ __int64 fpcr = _ReadStatusReg(0x5A20);
+ fpcr |= 0x1080000;
+ _WriteStatusReg(0x5A20, fpcr);
+#elif defined(__GNUC__) && defined(__arm__) && defined(__ARM_FP) && (__ARM_FP != 0)
uint32_t fpscr;
- __asm__ __volatile__(
- "VMRS %[fpscr], fpscr\n"
- "ORR %[fpscr], #0x1000000\n"
- "VMSR fpscr, %[fpscr]\n"
- : [fpscr] "=r" (fpscr));
-#elif defined(__aarch64__)
+ #if defined(__thumb__) && !defined(__thumb2__)
+ __asm__ __volatile__(
+ "VMRS %[fpscr], fpscr\n"
+ "ORRS %[fpscr], %[bitmask]\n"
+ "VMSR fpscr, %[fpscr]\n"
+ : [fpscr] "=l" (fpscr)
+ : [bitmask] "l" (0x1000000)
+ : "cc");
+ #else
+ __asm__ __volatile__(
+ "VMRS %[fpscr], fpscr\n"
+ "ORR %[fpscr], #0x1000000\n"
+ "VMSR fpscr, %[fpscr]\n"
+ : [fpscr] "=r" (fpscr));
+ #endif
+#elif defined(__GNUC__) && defined(__aarch64__)
uint64_t fpcr;
__asm__ __volatile__(
"MRS %[fpcr], fpcr\n"
@@ -60,3 +94,29 @@ static inline void disable_fpu_denormals() {
: [fpcr] "=r" (fpcr));
#endif
}
+
+static inline size_t modulo_decrement(size_t i, size_t n) {
+ /* Wrap modulo n, if needed */
+ if (i == 0) {
+ i = n;
+ }
+ /* Decrement input variable */
+ return i - 1;
+}
+
+static inline size_t divide_round_up(size_t dividend, size_t divisor) {
+ if (dividend % divisor == 0) {
+ return dividend / divisor;
+ } else {
+ return dividend / divisor + 1;
+ }
+}
+
+/* Windows headers define min and max macros; undefine it here */
+#ifdef min
+ #undef min
+#endif
+
+static inline size_t min(size_t a, size_t b) {
+ return a < b ? a : b;
+}
diff --git a/src/windows.c b/src/windows.c
new file mode 100644
index 0000000..c9b88f7
--- /dev/null
+++ b/src/windows.c
@@ -0,0 +1,364 @@
+/* Standard C headers */
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Configuration header */
+#include "threadpool-common.h"
+
+/* Windows headers */
+#include <windows.h>
+
+/* Public library header */
+#include <pthreadpool.h>
+
+/* Internal library headers */
+#include "threadpool-atomics.h"
+#include "threadpool-object.h"
+#include "threadpool-utils.h"
+
+
+static void checkin_worker_thread(struct pthreadpool* threadpool, uint32_t event_index) {
+ if (pthreadpool_decrement_fetch_release_size_t(&threadpool->active_threads) == 0) {
+ SetEvent(threadpool->completion_event[event_index]);
+ }
+}
+
+static void wait_worker_threads(struct pthreadpool* threadpool, uint32_t event_index) {
+ /* Initial check */
+ size_t active_threads = pthreadpool_load_acquire_size_t(&threadpool->active_threads);
+ if (active_threads == 0) {
+ return;
+ }
+
+ /* Spin-wait */
+ for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
+ pthreadpool_yield();
+
+ active_threads = pthreadpool_load_acquire_size_t(&threadpool->active_threads);
+ if (active_threads == 0) {
+ return;
+ }
+ }
+
+ /* Fall-back to event wait */
+ const DWORD wait_status = WaitForSingleObject(threadpool->completion_event[event_index], INFINITE);
+ assert(wait_status == WAIT_OBJECT_0);
+ assert(pthreadpool_load_relaxed_size_t(&threadpool->active_threads) == 0);
+}
+
+static uint32_t wait_for_new_command(
+ struct pthreadpool* threadpool,
+ uint32_t last_command,
+ uint32_t last_flags)
+{
+ uint32_t command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
+ if (command != last_command) {
+ return command;
+ }
+
+ if ((last_flags & PTHREADPOOL_FLAG_YIELD_WORKERS) == 0) {
+ /* Spin-wait loop */
+ for (uint32_t i = PTHREADPOOL_SPIN_WAIT_ITERATIONS; i != 0; i--) {
+ pthreadpool_yield();
+
+ command = pthreadpool_load_acquire_uint32_t(&threadpool->command);
+ if (command != last_command) {
+ return command;
+ }
+ }
+ }
+
+ /* Spin-wait disabled or timed out, fall back to event wait */
+ const uint32_t event_index = (last_command >> 31);
+ const DWORD wait_status = WaitForSingleObject(threadpool->command_event[event_index], INFINITE);
+ assert(wait_status == WAIT_OBJECT_0);
+
+ command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ assert(command != last_command);
+ return command;
+}
+
+static DWORD WINAPI thread_main(LPVOID arg) {
+ struct thread_info* thread = (struct thread_info*) arg;
+ struct pthreadpool* threadpool = thread->threadpool;
+ uint32_t last_command = threadpool_command_init;
+ struct fpu_state saved_fpu_state = { 0 };
+ uint32_t flags = 0;
+
+ /* Check in */
+ checkin_worker_thread(threadpool, 0);
+
+ /* Monitor new commands and act accordingly */
+ for (;;) {
+ uint32_t command = wait_for_new_command(threadpool, last_command, flags);
+ pthreadpool_fence_acquire();
+
+ flags = pthreadpool_load_relaxed_uint32_t(&threadpool->flags);
+
+ /* Process command */
+ switch (command & THREADPOOL_COMMAND_MASK) {
+ case threadpool_command_parallelize:
+ {
+ const thread_function_t thread_function =
+ (thread_function_t) pthreadpool_load_relaxed_void_p(&threadpool->thread_function);
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ thread_function(threadpool, thread);
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+ break;
+ }
+ case threadpool_command_shutdown:
+ /* Exit immediately: the master thread is waiting on pthread_join */
+ return 0;
+ case threadpool_command_init:
+ /* To inhibit compiler warning */
+ break;
+ }
+ /* Notify the master thread that we finished processing */
+ const uint32_t event_index = command >> 31;
+ checkin_worker_thread(threadpool, event_index);
+ /* Update last command */
+ last_command = command;
+ };
+ return 0;
+}
+
+struct pthreadpool* pthreadpool_create(size_t threads_count) {
+ if (threads_count == 0) {
+ SYSTEM_INFO system_info;
+ ZeroMemory(&system_info, sizeof(system_info));
+ GetSystemInfo(&system_info);
+ threads_count = (size_t) system_info.dwNumberOfProcessors;
+ }
+
+ struct pthreadpool* threadpool = pthreadpool_allocate(threads_count);
+ if (threadpool == NULL) {
+ return NULL;
+ }
+ threadpool->threads_count = fxdiv_init_size_t(threads_count);
+ for (size_t tid = 0; tid < threads_count; tid++) {
+ threadpool->threads[tid].thread_number = tid;
+ threadpool->threads[tid].threadpool = threadpool;
+ }
+
+ /* Thread pool with a single thread computes everything on the caller thread. */
+ if (threads_count > 1) {
+ threadpool->execution_mutex = CreateMutexW(
+ NULL /* mutex attributes */,
+ FALSE /* initially owned */,
+ NULL /* name */);
+ for (size_t i = 0; i < 2; i++) {
+ threadpool->completion_event[i] = CreateEventW(
+ NULL /* event attributes */,
+ TRUE /* manual-reset event: yes */,
+ FALSE /* initial state: nonsignaled */,
+ NULL /* name */);
+ threadpool->command_event[i] = CreateEventW(
+ NULL /* event attributes */,
+ TRUE /* manual-reset event: yes */,
+ FALSE /* initial state: nonsignaled */,
+ NULL /* name */);
+ }
+
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+
+ /* Caller thread serves as worker #0. Thus, we create system threads starting with worker #1. */
+ for (size_t tid = 1; tid < threads_count; tid++) {
+ threadpool->threads[tid].thread_handle = CreateThread(
+ NULL /* thread attributes */,
+ 0 /* stack size: default */,
+ &thread_main,
+ &threadpool->threads[tid],
+ 0 /* creation flags */,
+ NULL /* thread id */);
+ }
+
+ /* Wait until all threads initialize */
+ wait_worker_threads(threadpool, 0);
+ }
+ return threadpool;
+}
+
+PTHREADPOOL_INTERNAL void pthreadpool_parallelize(
+ struct pthreadpool* threadpool,
+ thread_function_t thread_function,
+ const void* params,
+ size_t params_size,
+ void* task,
+ void* context,
+ size_t linear_range,
+ uint32_t flags)
+{
+ assert(threadpool != NULL);
+ assert(thread_function != NULL);
+ assert(task != NULL);
+ assert(linear_range > 1);
+
+ /* Protect the global threadpool structures */
+ const DWORD wait_status = WaitForSingleObject(threadpool->execution_mutex, INFINITE);
+ assert(wait_status == WAIT_OBJECT_0);
+
+ /* Setup global arguments */
+ pthreadpool_store_relaxed_void_p(&threadpool->thread_function, (void*) thread_function);
+ pthreadpool_store_relaxed_void_p(&threadpool->task, task);
+ pthreadpool_store_relaxed_void_p(&threadpool->argument, context);
+ pthreadpool_store_relaxed_uint32_t(&threadpool->flags, flags);
+
+ const struct fxdiv_divisor_size_t threads_count = threadpool->threads_count;
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count.value - 1 /* caller thread */);
+
+ if (params_size != 0) {
+ CopyMemory(&threadpool->params, params, params_size);
+ pthreadpool_fence_release();
+ }
+
+ /* Spread the work between threads */
+ const struct fxdiv_result_size_t range_params = fxdiv_divide_size_t(linear_range, threads_count);
+ size_t range_start = 0;
+ for (size_t tid = 0; tid < threads_count.value; tid++) {
+ struct thread_info* thread = &threadpool->threads[tid];
+ const size_t range_length = range_params.quotient + (size_t) (tid < range_params.remainder);
+ const size_t range_end = range_start + range_length;
+ pthreadpool_store_relaxed_size_t(&thread->range_start, range_start);
+ pthreadpool_store_relaxed_size_t(&thread->range_end, range_end);
+ pthreadpool_store_relaxed_size_t(&thread->range_length, range_length);
+
+ /* The next subrange starts where the previous ended */
+ range_start = range_end;
+ }
+
+ /*
+ * Update the threadpool command.
+ * Imporantly, do it after initializing command parameters (range, task, argument, flags)
+ * ~(threadpool->command | THREADPOOL_COMMAND_MASK) flips the bits not in command mask
+ * to ensure the unmasked command is different then the last command, because worker threads
+ * monitor for change in the unmasked command.
+ */
+ const uint32_t old_command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ const uint32_t new_command = ~(old_command | THREADPOOL_COMMAND_MASK) | threadpool_command_parallelize;
+
+ /*
+ * Reset the command event for the next command.
+ * It is important to reset the event before writing out the new command, because as soon as the worker threads
+ * observe the new command, they may process it and switch to waiting on the next command event.
+ *
+ * Note: the event is different from the command event signalled in this update.
+ */
+ const uint32_t event_index = (old_command >> 31);
+ BOOL reset_event_status = ResetEvent(threadpool->command_event[event_index ^ 1]);
+ assert(reset_event_status != FALSE);
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated command parameters.
+ *
+ * Note: release semantics is necessary, because the workers might be waiting in a spin-loop
+ * rather than on the event object.
+ */
+ pthreadpool_store_release_uint32_t(&threadpool->command, new_command);
+
+ /*
+ * Signal the event to wake up the threads.
+ * Event in use must be switched after every submitted command to avoid race conditions.
+ * Choose the event based on the high bit of the command, which is flipped on every update.
+ */
+ const BOOL set_event_status = SetEvent(threadpool->command_event[event_index]);
+ assert(set_event_status != FALSE);
+
+ /* Save and modify FPU denormals control, if needed */
+ struct fpu_state saved_fpu_state = { 0 };
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ saved_fpu_state = get_fpu_state();
+ disable_fpu_denormals();
+ }
+
+ /* Do computations as worker #0 */
+ thread_function(threadpool, &threadpool->threads[0]);
+
+ /* Restore FPU denormals control, if needed */
+ if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+ set_fpu_state(saved_fpu_state);
+ }
+
+ /*
+ * Wait until the threads finish computation
+ * Use the complementary event because it corresponds to the new command.
+ */
+ wait_worker_threads(threadpool, event_index ^ 1);
+
+ /*
+ * Reset the completion event for the next command.
+ * Note: the event is different from the one used for waiting in this update.
+ */
+ reset_event_status = ResetEvent(threadpool->completion_event[event_index]);
+ assert(reset_event_status != FALSE);
+
+ /* Make changes by other threads visible to this thread */
+ pthreadpool_fence_acquire();
+
+ /* Unprotect the global threadpool structures */
+ const BOOL release_mutex_status = ReleaseMutex(threadpool->execution_mutex);
+ assert(release_mutex_status != FALSE);
+}
+
+void pthreadpool_destroy(struct pthreadpool* threadpool) {
+ if (threadpool != NULL) {
+ const size_t threads_count = threadpool->threads_count.value;
+ if (threads_count > 1) {
+ pthreadpool_store_relaxed_size_t(&threadpool->active_threads, threads_count - 1 /* caller thread */);
+
+ /*
+ * Store the command with release semantics to guarantee that if a worker thread observes
+ * the new command value, it also observes the updated active_threads values.
+ */
+ const uint32_t old_command = pthreadpool_load_relaxed_uint32_t(&threadpool->command);
+ pthreadpool_store_release_uint32_t(&threadpool->command, threadpool_command_shutdown);
+
+ /*
+ * Signal the event to wake up the threads.
+ * Event in use must be switched after every submitted command to avoid race conditions.
+ * Choose the event based on the high bit of the command, which is flipped on every update.
+ */
+ const uint32_t event_index = (old_command >> 31);
+ const BOOL set_event_status = SetEvent(threadpool->command_event[event_index]);
+ assert(set_event_status != FALSE);
+
+ /* Wait until all threads return */
+ for (size_t tid = 1; tid < threads_count; tid++) {
+ const HANDLE thread_handle = threadpool->threads[tid].thread_handle;
+ if (thread_handle != NULL) {
+ const DWORD wait_status = WaitForSingleObject(thread_handle, INFINITE);
+ assert(wait_status == WAIT_OBJECT_0);
+
+ const BOOL close_status = CloseHandle(thread_handle);
+ assert(close_status != FALSE);
+ }
+ }
+
+ /* Release resources */
+ if (threadpool->execution_mutex != NULL) {
+ const BOOL close_status = CloseHandle(threadpool->execution_mutex);
+ assert(close_status != FALSE);
+ }
+ for (size_t i = 0; i < 2; i++) {
+ if (threadpool->command_event[i] != NULL) {
+ const BOOL close_status = CloseHandle(threadpool->command_event[i]);
+ assert(close_status != FALSE);
+ }
+ if (threadpool->completion_event[i] != NULL) {
+ const BOOL close_status = CloseHandle(threadpool->completion_event[i]);
+ assert(close_status != FALSE);
+ }
+ }
+ }
+ pthreadpool_deallocate(threadpool);
+ }
+}
diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc
index 4faf3be..c9592ec 100644
--- a/test/pthreadpool.cc
+++ b/test/pthreadpool.cc
@@ -23,17 +23,44 @@ const size_t kParallelize2DTile2DRangeI = 53;
const size_t kParallelize2DTile2DRangeJ = 59;
const size_t kParallelize2DTile2DTileI = 5;
const size_t kParallelize2DTile2DTileJ = 7;
+const size_t kParallelize3DRangeI = 13;
+const size_t kParallelize3DRangeJ = 17;
+const size_t kParallelize3DRangeK = 19;
+const size_t kParallelize3DTile1DRangeI = 17;
+const size_t kParallelize3DTile1DRangeJ = 19;
+const size_t kParallelize3DTile1DRangeK = 23;
+const size_t kParallelize3DTile1DTileK = 5;
const size_t kParallelize3DTile2DRangeI = 19;
const size_t kParallelize3DTile2DRangeJ = 23;
const size_t kParallelize3DTile2DRangeK = 29;
const size_t kParallelize3DTile2DTileJ = 2;
const size_t kParallelize3DTile2DTileK = 3;
+const size_t kParallelize4DRangeI = 11;
+const size_t kParallelize4DRangeJ = 13;
+const size_t kParallelize4DRangeK = 17;
+const size_t kParallelize4DRangeL = 19;
+const size_t kParallelize4DTile1DRangeI = 13;
+const size_t kParallelize4DTile1DRangeJ = 17;
+const size_t kParallelize4DTile1DRangeK = 19;
+const size_t kParallelize4DTile1DRangeL = 23;
+const size_t kParallelize4DTile1DTileL = 5;
const size_t kParallelize4DTile2DRangeI = 17;
const size_t kParallelize4DTile2DRangeJ = 19;
const size_t kParallelize4DTile2DRangeK = 23;
const size_t kParallelize4DTile2DRangeL = 29;
const size_t kParallelize4DTile2DTileK = 2;
const size_t kParallelize4DTile2DTileL = 3;
+const size_t kParallelize5DRangeI = 7;
+const size_t kParallelize5DRangeJ = 11;
+const size_t kParallelize5DRangeK = 13;
+const size_t kParallelize5DRangeL = 17;
+const size_t kParallelize5DRangeM = 19;
+const size_t kParallelize5DTile1DRangeI = 11;
+const size_t kParallelize5DTile1DRangeJ = 13;
+const size_t kParallelize5DTile1DRangeK = 17;
+const size_t kParallelize5DTile1DRangeL = 19;
+const size_t kParallelize5DTile1DRangeM = 23;
+const size_t kParallelize5DTile1DTileM = 5;
const size_t kParallelize5DTile2DRangeI = 13;
const size_t kParallelize5DTile2DRangeJ = 17;
const size_t kParallelize5DTile2DRangeK = 19;
@@ -41,6 +68,19 @@ const size_t kParallelize5DTile2DRangeL = 23;
const size_t kParallelize5DTile2DRangeM = 29;
const size_t kParallelize5DTile2DTileL = 3;
const size_t kParallelize5DTile2DTileM = 2;
+const size_t kParallelize6DRangeI = 3;
+const size_t kParallelize6DRangeJ = 5;
+const size_t kParallelize6DRangeK = 7;
+const size_t kParallelize6DRangeL = 11;
+const size_t kParallelize6DRangeM = 13;
+const size_t kParallelize6DRangeN = 17;
+const size_t kParallelize6DTile1DRangeI = 5;
+const size_t kParallelize6DTile1DRangeJ = 7;
+const size_t kParallelize6DTile1DRangeK = 11;
+const size_t kParallelize6DTile1DRangeL = 13;
+const size_t kParallelize6DTile1DRangeM = 17;
+const size_t kParallelize6DTile1DRangeN = 19;
+const size_t kParallelize6DTile1DTileN = 5;
const size_t kParallelize6DTile2DRangeI = 7;
const size_t kParallelize6DTile2DRangeJ = 11;
const size_t kParallelize6DTile2DRangeK = 13;
@@ -54,6 +94,9 @@ const size_t kIncrementIterations = 101;
const size_t kIncrementIterations5D = 7;
const size_t kIncrementIterations6D = 3;
+const uint32_t kMaxUArchIndex = 0;
+const uint32_t kDefaultUArchIndex = 42;
+
TEST(CreateAndDestroy, NullThreadPool) {
pthreadpool* threadpool = nullptr;
@@ -274,6 +317,29 @@ TEST(Parallelize1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame1D(std::atomic_int* num_processed_items, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_t>(IncrementSame1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
static void WorkImbalance1D(std::atomic_int* num_processed_items, size_t i) {
num_processed_items->fetch_add(1, std::memory_order_relaxed);
if (i == 0) {
@@ -303,6 +369,321 @@ TEST(Parallelize1D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
}
+static void ComputeNothing1DWithUArch(void*, uint32_t, size_t) {
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(threadpool.get(),
+ ComputeNothing1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ ComputeNothing1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void CheckUArch1DWithUArch(void*, uint32_t uarch_index, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(threadpool.get(),
+ CheckUArch1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckUArch1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void CheckBounds1DWithUArch(void*, uint32_t, size_t i) {
+ EXPECT_LT(i, kParallelize1DRange);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckBounds1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ CheckBounds1DWithUArch,
+ nullptr,
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+}
+
+static void SetTrue1DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i) {
+ processed_indicators[i].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed))
+ << "Element " << i << " not processed";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed))
+ << "Element " << i << " not processed";
+ }
+}
+
+static void Increment1DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i) {
+ processed_counters[i].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+}
+
+TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize1DRange);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize1DRange; i++) {
+ EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+}
+
+static void IncrementSame1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(IncrementSame1DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
+static void WorkImbalance1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize1DRange) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize1DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_with_id_t>(WorkImbalance1DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex,
+ kMaxUArchIndex,
+ kParallelize1DRange,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange);
+}
+
static void ComputeNothing1DTile1D(void*, size_t, size_t) {
}
@@ -545,6 +926,31 @@ TEST(Parallelize1DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame1DTile1D(std::atomic_int* num_processed_items, size_t start_i, size_t tile_i) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize1DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_1d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(IncrementSame1DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize1DTile1DRange, kParallelize1DTile1DTile,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DTile1DRange);
+}
+
static void WorkImbalance1DTile1D(std::atomic_int* num_processed_items, size_t start_i, size_t tile_i) {
num_processed_items->fetch_add(tile_i, std::memory_order_relaxed);
if (start_i == 0) {
@@ -801,6 +1207,29 @@ TEST(Parallelize2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2D(std::atomic_int* num_processed_items, size_t i, size_t j) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_t>(IncrementSame2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DRangeI, kParallelize2DRangeJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DRangeI * kParallelize2DRangeJ);
+}
+
static void WorkImbalance2D(std::atomic_int* num_processed_items, size_t i, size_t j) {
num_processed_items->fetch_add(1, std::memory_order_relaxed);
if (i == 0 && j == 0) {
@@ -1097,6 +1526,31 @@ TEST(Parallelize2DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2DTile1D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t tile_j) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize2DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_1d_t>(IncrementSame2DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DTile1DRangeI, kParallelize2DTile1DRangeJ, kParallelize2DTile1DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile1DRangeI * kParallelize2DTile1DRangeJ);
+}
+
static void WorkImbalance2DTile1D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t tile_j) {
num_processed_items->fetch_add(tile_j, std::memory_order_relaxed);
if (i == 0 && start_j == 0) {
@@ -1415,6 +1869,34 @@ TEST(Parallelize2DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame2DTile2D(std::atomic_int* num_processed_items, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_t>(IncrementSame2DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
static void WorkImbalance2DTile2D(std::atomic_int* num_processed_items, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
num_processed_items->fetch_add(tile_i * tile_j, std::memory_order_relaxed);
if (start_i == 0 && start_j == 0) {
@@ -1445,6 +1927,1045 @@ TEST(Parallelize2DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
}
+static void ComputeNothing2DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckUArch2DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckBounds2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ EXPECT_LT(start_i, kParallelize2DTile2DRangeI);
+ EXPECT_LT(start_j, kParallelize2DTile2DRangeJ);
+ EXPECT_LE(start_i + tile_i, kParallelize2DTile2DRangeI);
+ EXPECT_LE(start_j + tile_j, kParallelize2DTile2DRangeJ);
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void CheckTiling2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ EXPECT_GT(tile_i, 0);
+ EXPECT_LE(tile_i, kParallelize2DTile2DTileI);
+ EXPECT_EQ(start_i % kParallelize2DTile2DTileI, 0);
+ EXPECT_EQ(tile_i, std::min<size_t>(kParallelize2DTile2DTileI, kParallelize2DTile2DRangeI - start_i));
+
+ EXPECT_GT(tile_j, 0);
+ EXPECT_LE(tile_j, kParallelize2DTile2DTileJ);
+ EXPECT_EQ(start_j % kParallelize2DTile2DTileJ, 0);
+ EXPECT_EQ(tile_j, std::min<size_t>(kParallelize2DTile2DTileJ, kParallelize2DTile2DRangeJ - start_j));
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling2DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+}
+
+static void SetTrue2DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ") not processed";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ") not processed";
+ }
+ }
+}
+
+static void Increment2DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) {
+ const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+}
+
+static void IncrementSame2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ for (size_t i = start_i; i < start_i + tile_i; i++) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(IncrementSame2DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
+static void WorkImbalance2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) {
+ num_processed_items->fetch_add(tile_i * tile_j, std::memory_order_relaxed);
+ if (start_i == 0 && start_j == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_2d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(WorkImbalance2DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ,
+ kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ);
+}
+
+static void ComputeNothing3D(void*, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(threadpool.get(),
+ ComputeNothing3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ ComputeNothing3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3D(void*, size_t i, size_t j, size_t k) {
+ EXPECT_LT(i, kParallelize3DRangeI);
+ EXPECT_LT(j, kParallelize3DRangeJ);
+ EXPECT_LT(k, kParallelize3DRangeK);
+}
+
+TEST(Parallelize3D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ CheckBounds3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ CheckBounds3D,
+ nullptr,
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+}
+
+static void SetTrue3D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(SetTrue3D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(SetTrue3D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(Increment3D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DRangeJ + j) * kParallelize3DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize3D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(IncrementSame3D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+}
+
+static void WorkImbalance3D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_t>(WorkImbalance3D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DRangeI, kParallelize3DRangeJ, kParallelize3DRangeK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DRangeI * kParallelize3DRangeJ * kParallelize3DRangeK);
+}
+
+static void ComputeNothing3DTile1D(void*, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(threadpool.get(),
+ ComputeNothing3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ ComputeNothing3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3DTile1D(void*, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ EXPECT_LT(i, kParallelize3DTile1DRangeI);
+ EXPECT_LT(j, kParallelize3DTile1DRangeJ);
+ EXPECT_LT(start_k, kParallelize3DTile1DRangeK);
+ EXPECT_LE(start_k + tile_k, kParallelize3DTile1DRangeK);
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckBounds3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckBounds3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void CheckTiling3DTile1D(void*, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize3DTile1DTileK);
+ EXPECT_EQ(start_k % kParallelize3DTile1DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize3DTile1DTileK, kParallelize3DTile1DRangeK - start_k));
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckTiling3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ CheckTiling3DTile1D,
+ nullptr,
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+}
+
+static void SetTrue3DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(SetTrue3DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(SetTrue3DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(Increment3DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile1DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile1DRangeJ + j) * kParallelize3DTile1DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(IncrementSame3DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+}
+
+static void WorkImbalance3DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t tile_k) {
+ num_processed_items->fetch_add(tile_k, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && start_k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_1d_t>(WorkImbalance3DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile1DRangeI, kParallelize3DTile1DRangeJ, kParallelize3DTile1DRangeK,
+ kParallelize3DTile1DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile1DRangeI * kParallelize3DTile1DRangeJ * kParallelize3DTile1DRangeK);
+}
+
static void ComputeNothing3DTile2D(void*, size_t, size_t, size_t, size_t, size_t) {
}
@@ -1747,6 +3268,34 @@ TEST(Parallelize3DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame3DTile2D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_t>(IncrementSame3DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
static void WorkImbalance3DTile2D(std::atomic_int* num_processed_items, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
num_processed_items->fetch_add(tile_j * tile_k, std::memory_order_relaxed);
if (i == 0 && start_j == 0 && start_k == 0) {
@@ -1777,6 +3326,1084 @@ TEST(Parallelize3DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
}
+static void ComputeNothing3DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckUArch3DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckBounds3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ EXPECT_LT(i, kParallelize3DTile2DRangeI);
+ EXPECT_LT(start_j, kParallelize3DTile2DRangeJ);
+ EXPECT_LT(start_k, kParallelize3DTile2DRangeK);
+ EXPECT_LE(start_j + tile_j, kParallelize3DTile2DRangeJ);
+ EXPECT_LE(start_k + tile_k, kParallelize3DTile2DRangeK);
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void CheckTiling3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ EXPECT_GT(tile_j, 0);
+ EXPECT_LE(tile_j, kParallelize3DTile2DTileJ);
+ EXPECT_EQ(start_j % kParallelize3DTile2DTileJ, 0);
+ EXPECT_EQ(tile_j, std::min<size_t>(kParallelize3DTile2DTileJ, kParallelize3DTile2DRangeJ - start_j));
+
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize3DTile2DTileK);
+ EXPECT_EQ(start_k % kParallelize3DTile2DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize3DTile2DTileK, kParallelize3DTile2DRangeK - start_k));
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling3DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+}
+
+static void SetTrue3DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ") not processed";
+ }
+ }
+ }
+}
+
+static void Increment3DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) {
+ const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+}
+
+static void IncrementSame3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ for (size_t j = start_j; j < start_j + tile_j; j++) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(IncrementSame3DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
+static void WorkImbalance3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) {
+ num_processed_items->fetch_add(tile_j * tile_k, std::memory_order_relaxed);
+ if (i == 0 && start_j == 0 && start_k == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_3d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(WorkImbalance3DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK,
+ kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK);
+}
+
+static void ComputeNothing4D(void*, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(threadpool.get(),
+ ComputeNothing4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ ComputeNothing4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4D(void*, size_t i, size_t j, size_t k, size_t l) {
+ EXPECT_LT(i, kParallelize4DRangeI);
+ EXPECT_LT(j, kParallelize4DRangeJ);
+ EXPECT_LT(k, kParallelize4DRangeK);
+ EXPECT_LT(l, kParallelize4DRangeL);
+}
+
+TEST(Parallelize4D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ CheckBounds4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ CheckBounds4D,
+ nullptr,
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+}
+
+static void SetTrue4D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(SetTrue4D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(SetTrue4D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(Increment4D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DRangeJ + j) * kParallelize4DRangeK + k) * kParallelize4DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize4D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(IncrementSame4D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+}
+
+static void WorkImbalance4D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_t>(WorkImbalance4D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DRangeI, kParallelize4DRangeJ, kParallelize4DRangeK, kParallelize4DRangeL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DRangeI * kParallelize4DRangeJ * kParallelize4DRangeK * kParallelize4DRangeL);
+}
+
+static void ComputeNothing4DTile1D(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(threadpool.get(),
+ ComputeNothing4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ ComputeNothing4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4DTile1D(void*, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ EXPECT_LT(i, kParallelize4DTile1DRangeI);
+ EXPECT_LT(j, kParallelize4DTile1DRangeJ);
+ EXPECT_LT(k, kParallelize4DTile1DRangeK);
+ EXPECT_LT(start_l, kParallelize4DTile1DRangeL);
+ EXPECT_LE(start_l + tile_l, kParallelize4DTile1DRangeL);
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckBounds4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckBounds4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void CheckTiling4DTile1D(void*, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ EXPECT_GT(tile_l, 0);
+ EXPECT_LE(tile_l, kParallelize4DTile1DTileL);
+ EXPECT_EQ(start_l % kParallelize4DTile1DTileL, 0);
+ EXPECT_EQ(tile_l, std::min<size_t>(kParallelize4DTile1DTileL, kParallelize4DTile1DRangeL - start_l));
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckTiling4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ CheckTiling4DTile1D,
+ nullptr,
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+}
+
+static void SetTrue4DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(SetTrue4DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(SetTrue4DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(Increment4DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile1DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile1DRangeJ + j) * kParallelize4DTile1DRangeK + k) * kParallelize4DTile1DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(IncrementSame4DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+}
+
+static void WorkImbalance4DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t tile_l) {
+ num_processed_items->fetch_add(tile_l, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && start_l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_1d_t>(WorkImbalance4DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile1DRangeI, kParallelize4DTile1DRangeJ, kParallelize4DTile1DRangeK, kParallelize4DTile1DRangeL,
+ kParallelize4DTile1DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile1DRangeI * kParallelize4DTile1DRangeJ * kParallelize4DTile1DRangeK * kParallelize4DTile1DRangeL);
+}
+
static void ComputeNothing4DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -2092,6 +4719,34 @@ TEST(Parallelize4DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame4DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_t>(IncrementSame4DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
static void WorkImbalance4DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
num_processed_items->fetch_add(tile_k * tile_l, std::memory_order_relaxed);
if (i == 0 && j == 0 && start_k == 0 && start_l == 0) {
@@ -2122,6 +4777,1123 @@ TEST(Parallelize4DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
}
+static void ComputeNothing4DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(threadpool.get(),
+ ComputeNothing4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ ComputeNothing4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckUArch4DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t, size_t) {
+ if (uarch_index != kDefaultUArchIndex) {
+ EXPECT_LE(uarch_index, kMaxUArchIndex);
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUArchInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckUArch4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckBounds4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ EXPECT_LT(i, kParallelize4DTile2DRangeI);
+ EXPECT_LT(j, kParallelize4DTile2DRangeJ);
+ EXPECT_LT(start_k, kParallelize4DTile2DRangeK);
+ EXPECT_LT(start_l, kParallelize4DTile2DRangeL);
+ EXPECT_LE(start_k + tile_k, kParallelize4DTile2DRangeK);
+ EXPECT_LE(start_l + tile_l, kParallelize4DTile2DRangeL);
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckBounds4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void CheckTiling4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ EXPECT_GT(tile_k, 0);
+ EXPECT_LE(tile_k, kParallelize4DTile2DTileK);
+ EXPECT_EQ(start_k % kParallelize4DTile2DTileK, 0);
+ EXPECT_EQ(tile_k, std::min<size_t>(kParallelize4DTile2DTileK, kParallelize4DTile2DRangeK - start_k));
+
+ EXPECT_GT(tile_l, 0);
+ EXPECT_LE(tile_l, kParallelize4DTile2DTileL);
+ EXPECT_EQ(start_l % kParallelize4DTile2DTileL, 0);
+ EXPECT_EQ(tile_l, std::min<size_t>(kParallelize4DTile2DTileL, kParallelize4DTile2DRangeL - start_l));
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ CheckTiling4DTile2DWithUArch,
+ nullptr,
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+}
+
+static void SetTrue4DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch),
+ static_cast<void*>(indicators.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed";
+ }
+ }
+ }
+ }
+}
+
+static void Increment4DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch),
+ static_cast<void*>(counters.data()),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) {
+ const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations << ")";
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ for (size_t k = start_k; k < start_k + tile_k; k++) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(IncrementSame4DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
+static void WorkImbalance4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) {
+ num_processed_items->fetch_add(tile_k * tile_l, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && start_k == 0 && start_l == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_4d_tile_2d_with_uarch(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(WorkImbalance4DTile2DWithUArch),
+ static_cast<void*>(&num_processed_items),
+ kDefaultUArchIndex, kMaxUArchIndex,
+ kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL,
+ kParallelize4DTile2DTileK, kParallelize4DTile2DTileL,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL);
+}
+
+static void ComputeNothing5D(void*, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize5D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(threadpool.get(),
+ ComputeNothing5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ ComputeNothing5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+static void CheckBounds5D(void*, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ EXPECT_LT(i, kParallelize5DRangeI);
+ EXPECT_LT(j, kParallelize5DRangeJ);
+ EXPECT_LT(k, kParallelize5DRangeK);
+ EXPECT_LT(l, kParallelize5DRangeL);
+ EXPECT_LT(m, kParallelize5DRangeM);
+}
+
+TEST(Parallelize5D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ CheckBounds5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ CheckBounds5D,
+ nullptr,
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+}
+
+static void SetTrue5D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(SetTrue5D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(SetTrue5D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment5D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(Increment5D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DRangeJ + j) * kParallelize5DRangeK + k) * kParallelize5DRangeL + l) * kParallelize5DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame5D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize5D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(IncrementSame5D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+}
+
+static void WorkImbalance5D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize5D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_t>(WorkImbalance5D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DRangeI, kParallelize5DRangeJ, kParallelize5DRangeK, kParallelize5DRangeL, kParallelize5DRangeM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DRangeI * kParallelize5DRangeJ * kParallelize5DRangeK * kParallelize5DRangeL * kParallelize5DRangeM);
+}
+
+static void ComputeNothing5DTile1D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(threadpool.get(),
+ ComputeNothing5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ ComputeNothing5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void CheckBounds5DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ EXPECT_LT(i, kParallelize5DTile1DRangeI);
+ EXPECT_LT(j, kParallelize5DTile1DRangeJ);
+ EXPECT_LT(k, kParallelize5DTile1DRangeK);
+ EXPECT_LT(l, kParallelize5DTile1DRangeL);
+ EXPECT_LT(start_m, kParallelize5DTile1DRangeM);
+ EXPECT_LE(start_m + tile_m, kParallelize5DTile1DRangeM);
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckBounds5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckBounds5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void CheckTiling5DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ EXPECT_GT(tile_m, 0);
+ EXPECT_LE(tile_m, kParallelize5DTile1DTileM);
+ EXPECT_EQ(start_m % kParallelize5DTile1DTileM, 0);
+ EXPECT_EQ(tile_m, std::min<size_t>(kParallelize5DTile1DTileM, kParallelize5DTile1DRangeM - start_m));
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckTiling5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ CheckTiling5DTile1D,
+ nullptr,
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+}
+
+static void SetTrue5DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(SetTrue5DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(SetTrue5DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") not processed";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment5DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations5D; iteration++) {
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(Increment5DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize5DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize5DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize5DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize5DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize5DTile1DRangeM; m++) {
+ const size_t linear_idx = (((i * kParallelize5DTile1DRangeJ + j) * kParallelize5DTile1DRangeK + k) * kParallelize5DTile1DRangeL + l) * kParallelize5DTile1DRangeM + m;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations5D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations5D << ")";
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame5DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(IncrementSame5DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+}
+
+static void WorkImbalance5DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t tile_m) {
+ num_processed_items->fetch_add(tile_m, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && start_m == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize5DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_1d_t>(WorkImbalance5DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile1DRangeI, kParallelize5DTile1DRangeJ, kParallelize5DTile1DRangeK, kParallelize5DTile1DRangeL, kParallelize5DTile1DRangeM,
+ kParallelize5DTile1DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile1DRangeI * kParallelize5DTile1DRangeJ * kParallelize5DTile1DRangeK * kParallelize5DTile1DRangeL * kParallelize5DTile1DRangeM);
+}
+
static void ComputeNothing5DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -2450,6 +6222,34 @@ TEST(Parallelize5DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame5DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t start_m, size_t tile_l, size_t tile_m) {
+ for (size_t l = start_l; l < start_l + tile_l; l++) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize5DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_5d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_5d_tile_2d_t>(IncrementSame5DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize5DTile2DRangeI, kParallelize5DTile2DRangeJ, kParallelize5DTile2DRangeK, kParallelize5DTile2DRangeL, kParallelize5DTile2DRangeM,
+ kParallelize5DTile2DTileL, kParallelize5DTile2DTileM,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile2DRangeI * kParallelize5DTile2DRangeJ * kParallelize5DTile2DRangeK * kParallelize5DTile2DRangeL * kParallelize5DTile2DRangeM);
+}
+
static void WorkImbalance5DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t start_l, size_t start_m, size_t tile_l, size_t tile_m) {
num_processed_items->fetch_add(tile_l * tile_m, std::memory_order_relaxed);
if (i == 0 && j == 0 && k == 0 && start_l == 0 && start_m == 0) {
@@ -2480,6 +6280,724 @@ TEST(Parallelize5DTile2D, MultiThreadPoolWorkStealing) {
EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize5DTile2DRangeI * kParallelize5DTile2DRangeJ * kParallelize5DTile2DRangeK * kParallelize5DTile2DRangeL * kParallelize5DTile2DRangeM);
}
+static void ComputeNothing6D(void*, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize6D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d(threadpool.get(),
+ ComputeNothing6D,
+ nullptr,
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+}
+
+TEST(Parallelize6D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ ComputeNothing6D,
+ nullptr,
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+}
+
+static void CheckBounds6D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
+ EXPECT_LT(i, kParallelize6DRangeI);
+ EXPECT_LT(j, kParallelize6DRangeJ);
+ EXPECT_LT(k, kParallelize6DRangeK);
+ EXPECT_LT(l, kParallelize6DRangeL);
+ EXPECT_LT(m, kParallelize6DRangeM);
+ EXPECT_LT(n, kParallelize6DRangeN);
+}
+
+TEST(Parallelize6D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ CheckBounds6D,
+ nullptr,
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+}
+
+TEST(Parallelize6D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ CheckBounds6D,
+ nullptr,
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+}
+
+static void SetTrue6D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize6D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(SetTrue6D),
+ static_cast<void*>(indicators.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(SetTrue6D),
+ static_cast<void*>(indicators.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment6D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize6D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(Increment6D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(Increment6D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) {
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(Increment6D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations6D << ")";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) {
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(Increment6D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize6DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DRangeJ + j) * kParallelize6DRangeK + k) * kParallelize6DRangeL + l) * kParallelize6DRangeM + m) * kParallelize6DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations6D << ")";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame6D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize6D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(IncrementSame6D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+}
+
+static void WorkImbalance6D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0 && n == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize6D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_t>(WorkImbalance6D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DRangeI, kParallelize6DRangeJ, kParallelize6DRangeK, kParallelize6DRangeL, kParallelize6DRangeM, kParallelize6DRangeN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DRangeI * kParallelize6DRangeJ * kParallelize6DRangeK * kParallelize6DRangeL * kParallelize6DRangeM * kParallelize6DRangeN);
+}
+
+static void ComputeNothing6DTile1D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d_tile_1d(threadpool.get(),
+ ComputeNothing6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolCompletes) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ ComputeNothing6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+static void CheckBounds6DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ EXPECT_LT(i, kParallelize6DTile1DRangeI);
+ EXPECT_LT(j, kParallelize6DTile1DRangeJ);
+ EXPECT_LT(k, kParallelize6DTile1DRangeK);
+ EXPECT_LT(l, kParallelize6DTile1DRangeL);
+ EXPECT_LT(m, kParallelize6DTile1DRangeM);
+ EXPECT_LT(start_n, kParallelize6DTile1DRangeN);
+ EXPECT_LE(start_n + tile_n, kParallelize6DTile1DRangeN);
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ CheckBounds6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolAllItemsInBounds) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ CheckBounds6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+static void CheckTiling6DTile1D(void*, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ EXPECT_GT(tile_n, 0);
+ EXPECT_LE(tile_n, kParallelize6DTile1DTileN);
+ EXPECT_EQ(start_n % kParallelize6DTile1DTileN, 0);
+ EXPECT_EQ(tile_n, std::min<size_t>(kParallelize6DTile1DTileN, kParallelize6DTile1DRangeN - start_n));
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ CheckTiling6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolUniformTiling) {
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ CheckTiling6DTile1D,
+ nullptr,
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+}
+
+static void SetTrue6DTile1D(std::atomic_bool* processed_indicators, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ for (size_t n = start_n; n < start_n + tile_n; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(SetTrue6DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolAllItemsProcessed) {
+ std::vector<std::atomic_bool> indicators(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(SetTrue6DTile1D),
+ static_cast<void*>(indicators.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") not processed";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void Increment6DTile1D(std::atomic_int* processed_counters, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ for (size_t n = start_n; n < start_n + tile_n; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(Increment6DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolEachItemProcessedOnce) {
+ std::vector<std::atomic_int> counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(Increment6DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6DTile1D, SingleThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) {
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(Increment6DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations6D << ")";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolEachItemProcessedMultipleTimes) {
+ std::vector<std::atomic_int> counters(kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ for (size_t iteration = 0; iteration < kIncrementIterations6D; iteration++) {
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(Increment6DTile1D),
+ static_cast<void*>(counters.data()),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+ }
+
+ for (size_t i = 0; i < kParallelize6DTile1DRangeI; i++) {
+ for (size_t j = 0; j < kParallelize6DTile1DRangeJ; j++) {
+ for (size_t k = 0; k < kParallelize6DTile1DRangeK; k++) {
+ for (size_t l = 0; l < kParallelize6DTile1DRangeL; l++) {
+ for (size_t m = 0; m < kParallelize6DTile1DRangeM; m++) {
+ for (size_t n = 0; n < kParallelize6DTile1DRangeN; n++) {
+ const size_t linear_idx = ((((i * kParallelize6DTile1DRangeJ + j) * kParallelize6DTile1DRangeK + k) * kParallelize6DTile1DRangeL + l) * kParallelize6DTile1DRangeM + m) * kParallelize6DTile1DRangeN + n;
+ EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations6D)
+ << "Element (" << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ") was processed "
+ << counters[linear_idx].load(std::memory_order_relaxed) << " times "
+ << "(expected: " << kIncrementIterations6D << ")";
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void IncrementSame6DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ for (size_t n = start_n; n < start_n + tile_n; n++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(IncrementSame6DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+}
+
+static void WorkImbalance6DTile1D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t m, size_t start_n, size_t tile_n) {
+ num_processed_items->fetch_add(tile_n, std::memory_order_relaxed);
+ if (i == 0 && j == 0 && k == 0 && l == 0 && m == 0 && start_n == 0) {
+ /* Spin-wait until all items are computed */
+ while (num_processed_items->load(std::memory_order_relaxed) != kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ }
+}
+
+TEST(Parallelize6DTile1D, MultiThreadPoolWorkStealing) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_1d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_1d_t>(WorkImbalance6DTile1D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DTile1DRangeI, kParallelize6DTile1DRangeJ, kParallelize6DTile1DRangeK, kParallelize6DTile1DRangeL, kParallelize6DTile1DRangeM, kParallelize6DTile1DRangeN,
+ kParallelize6DTile1DTileN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile1DRangeI * kParallelize6DTile1DRangeJ * kParallelize6DTile1DRangeK * kParallelize6DTile1DRangeL * kParallelize6DTile1DRangeM * kParallelize6DTile1DRangeN);
+}
+
static void ComputeNothing6DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t) {
}
@@ -2821,6 +7339,34 @@ TEST(Parallelize6DTile2D, MultiThreadPoolEachItemProcessedMultipleTimes) {
}
}
+static void IncrementSame6DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t start_n, size_t tile_m, size_t tile_n) {
+ for (size_t m = start_m; m < start_m + tile_m; m++) {
+ for (size_t n = start_n; n < start_n + tile_n; n++) {
+ num_processed_items->fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+}
+
+TEST(Parallelize6DTile2D, MultiThreadPoolHighContention) {
+ std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+ auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+ ASSERT_TRUE(threadpool.get());
+
+ if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+ GTEST_SKIP();
+ }
+
+ pthreadpool_parallelize_6d_tile_2d(
+ threadpool.get(),
+ reinterpret_cast<pthreadpool_task_6d_tile_2d_t>(IncrementSame6DTile2D),
+ static_cast<void*>(&num_processed_items),
+ kParallelize6DTile2DRangeI, kParallelize6DTile2DRangeJ, kParallelize6DTile2DRangeK, kParallelize6DTile2DRangeL, kParallelize6DTile2DRangeM, kParallelize6DTile2DRangeN,
+ kParallelize6DTile2DTileM, kParallelize6DTile2DTileN,
+ 0 /* flags */);
+ EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize6DTile2DRangeI * kParallelize6DTile2DRangeJ * kParallelize6DTile2DRangeK * kParallelize6DTile2DRangeL * kParallelize6DTile2DRangeM * kParallelize6DTile2DRangeN);
+}
+
static void WorkImbalance6DTile2D(std::atomic_int* num_processed_items, size_t i, size_t j, size_t k, size_t l, size_t start_m, size_t start_n, size_t tile_m, size_t tile_n) {
num_processed_items->fetch_add(tile_m * tile_n, std::memory_order_relaxed);
if (i == 0 && j == 0 && k == 0 && l == 0 && start_m == 0 && start_n == 0) {