From 62733f2a2ac445d1b8372fb23281207b470eb0a2 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sun, 9 Feb 2025 23:30:13 +0000 Subject: [PATCH 01/12] vulkan: improve im2col performance --- .../ggml-vulkan/vulkan-shaders/im2col.comp | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp index 122b1e93fb4..3d6370ea472 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp @@ -40,6 +40,20 @@ void main() { const uint batch = gl_GlobalInvocationID.z / p.IC; const uint ic = gl_GlobalInvocationID.z % p.IC; + const uint src_base = ic * p.offset_delta + batch * p.batch_offset; + const uint dst_base = ((batch * p.OH + oh) * p.OW) * p.CHW + ic * (p.KW * p.KH); + const int oh_s1 = int(oh) * p.s1; + const uint ksize = p.OW * (p.KH > 1 ? p.KW : 1); + + const uint base_linear_idx = gidx * NUM_ITER; + + const uint max_ky = ksize / p.OW; + + uint current_kx = base_linear_idx / ksize; + const uint rem = base_linear_idx - (current_kx * ksize); + uint current_ky = rem / p.OW; + uint current_ix = rem % p.OW; + A_TYPE values[NUM_ITER]; uint offset_dst[NUM_ITER]; [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { @@ -48,36 +62,36 @@ void main() { [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { - const uint i = gidx * NUM_ITER + idx; + const uint linear_idx = base_linear_idx + idx; - const uint ksize = p.OW * (p.KH > 1 ? p.KW : 1); - const uint kx = i / ksize; - const uint kd = kx * ksize; - const uint ky = (i - kd) / p.OW; - const uint ix = i % p.OW; + if (linear_idx >= p.pelements) { + continue; + } - const uint iiw = ix * p.s0 + kx * p.d0 - p.p0; - const uint iih = oh * p.s1 + ky * p.d1 - p.p1; + const int iiw = int(current_ix) * p.s0 + int(current_kx) * p.d0 - p.p0; + const int iih = oh_s1 + int(current_ky) * p.d1 - p.p1; - offset_dst[idx] = - ((batch * p.OH + oh) * p.OW + ix) * p.CHW + - (ic * (p.KW * p.KH) + ky * p.KW + kx); + offset_dst[idx] = dst_base + current_ix * p.CHW + current_ky * p.KW + current_kx; - if (i >= p.pelements) { - continue; + const bool valid = (iih >= 0 && iih < int(p.IH)) && (iiw >= 0 && iiw < int(p.IW)); + if (valid) { + values[idx] = data_a[src_base + uint(iih) * p.IW + uint(iiw)]; } - if (iih < p.IH && iiw < p.IW) { - const uint offset_src = ic * p.offset_delta + batch * p.batch_offset; - values[idx] = data_a[offset_src + iih * p.IW + iiw]; + if (++current_ix == p.OW) { + current_ix = 0; + if (++current_ky == max_ky) { + current_ky = 0; + current_kx++; + } } } [[unroll]] for (uint idx = 0; idx < NUM_ITER; ++idx) { - const uint i = gidx * NUM_ITER + idx; + const uint linear_idx = base_linear_idx + idx; - if (i >= p.pelements) { + if (linear_idx >= p.pelements) { continue; } From 35f63698b8397356a1e7a0fbd68030a2cf134218 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Fri, 14 Feb 2025 17:30:27 +0100 Subject: [PATCH 02/12] Force subgroup 32 on RX 5700 and subgroup 64 for im2col --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 39 +++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index d32ba4efbc9..3262f5f4da4 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1543,11 +1543,18 @@ static void ggml_vk_load_shaders(vk_device& device) { device->pipeline_matmul_id_f32 = std::make_shared(); } + vk::PhysicalDeviceProperties2 props2; + device->physical_device.getProperties2(&props2); + std::string device_name = props2.properties.deviceName.data(); std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { + if (required_subgroup_size == 0) { + required_subgroup_size = (device_name.find("RX 5700") != std::string::npos) ? 32 : required_subgroup_size; + } + if (!pipeline) { pipeline = std::make_shared(); pipeline->name = name; @@ -1573,6 +1580,20 @@ static void ggml_vk_load_shaders(vk_device& device) { parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size)); }; + // New lambda for pipelines with subgroup size 64. + auto const &ggml_vk_create_pipeline_64 = [&](vk_device& device, vk_pipeline& pipeline, + const std::string &name, size_t spv_size, const void* spv_data, + const std::string &entrypoint, uint32_t parameter_count, + uint32_t push_constant_size, std::array wg_denoms, + const std::vector& specialization_constants, uint32_t align, + bool disable_robustness = false, bool require_full_subgroups = false) + { + ggml_vk_create_pipeline(device, pipeline, name, spv_size, spv_data, entrypoint, + parameter_count, push_constant_size, wg_denoms, + specialization_constants, align, disable_robustness, + require_full_subgroups, 64); + }; + #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) if (device->coopmat2) { @@ -2151,11 +2172,21 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - if (device->float_controls_rte_fp16) { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + // Workaround needed to speedup im2col on RX 5700 + if (device_name.find("RX 5700") != std::string::npos) { + ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + if (device->float_controls_rte_fp16) { + ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + } else { + ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + } } else { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + if (device->float_controls_rte_fp16) { + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + } else { + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + } } ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1); From 293edefd46a845a3e851996b20e94ae74ebd7328 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Fri, 14 Feb 2025 17:36:46 +0100 Subject: [PATCH 03/12] Fixed uint --- ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp index 3d6370ea472..639298ce65b 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp @@ -68,8 +68,8 @@ void main() { continue; } - const int iiw = int(current_ix) * p.s0 + int(current_kx) * p.d0 - p.p0; - const int iih = oh_s1 + int(current_ky) * p.d1 - p.p1; + const uint iiw = int(current_ix) * p.s0 + int(current_kx) * p.d0 - p.p0; + const uint iih = oh_s1 + int(current_ky) * p.d1 - p.p1; offset_dst[idx] = dst_base + current_ix * p.CHW + current_ky * p.KW + current_kx; From 14ea4fadbcd65ea4c18cf014b1893e7f48ab3a51 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Fri, 14 Feb 2025 22:17:40 +0100 Subject: [PATCH 04/12] Helper function to set subgroup size --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 64 ++++++++++++++++------------ 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 3262f5f4da4..ad28acb6c18 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1423,6 +1423,36 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } +// Define a configuration map per GPU. +// Outer key: GPU identifier (e.g. "RX 5700"). +// Inner map: key is pipeline name; value is the subgroup size. +static std::unordered_map> gpu_pipeline_config = { + {"RX 5700", { + {"im2col_f32", 64}, + {"im2col_f32_f16", 64} + }} +}; + +// Helper function defined at namespace scope. +static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { + std::string foundKey; + for (const auto &entry : gpu_pipeline_config) { + if (device_name.find(entry.first) != std::string::npos) { + foundKey = entry.first; + break; + } + } + if (!foundKey.empty()) { + auto &pipelineMap = gpu_pipeline_config[foundKey]; + auto pipIt = pipelineMap.find(pipeline_name); + if (pipIt != pipelineMap.end() && pipIt->second != 0) { + return pipIt->second; + } + } + // If not defined, return 0. + return 0; +} + static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); @@ -1546,11 +1576,13 @@ static void ggml_vk_load_shaders(vk_device& device) { vk::PhysicalDeviceProperties2 props2; device->physical_device.getProperties2(&props2); std::string device_name = props2.properties.deviceName.data(); + std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { + required_subgroup_size = get_subgroup_size(name, device_name); if (required_subgroup_size == 0) { required_subgroup_size = (device_name.find("RX 5700") != std::string::npos) ? 32 : required_subgroup_size; } @@ -1580,20 +1612,6 @@ static void ggml_vk_load_shaders(vk_device& device) { parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size)); }; - // New lambda for pipelines with subgroup size 64. - auto const &ggml_vk_create_pipeline_64 = [&](vk_device& device, vk_pipeline& pipeline, - const std::string &name, size_t spv_size, const void* spv_data, - const std::string &entrypoint, uint32_t parameter_count, - uint32_t push_constant_size, std::array wg_denoms, - const std::vector& specialization_constants, uint32_t align, - bool disable_robustness = false, bool require_full_subgroups = false) - { - ggml_vk_create_pipeline(device, pipeline, name, spv_size, spv_data, entrypoint, - parameter_count, push_constant_size, wg_denoms, - specialization_constants, align, disable_robustness, - require_full_subgroups, 64); - }; - #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) if (device->coopmat2) { @@ -2172,21 +2190,11 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); - // Workaround needed to speedup im2col on RX 5700 - if (device_name.find("RX 5700") != std::string::npos) { - ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - if (device->float_controls_rte_fp16) { - ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - } else { - ggml_vk_create_pipeline_64(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - } + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); + if (device->float_controls_rte_fp16) { + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); } else { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - if (device->float_controls_rte_fp16) { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - } else { - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); - } + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {512, 1, 1}, { device->subgroup_size }, 1, true); } ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1); From 04100e850630559b01fdeba23a9fe0830b05080e Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Fri, 14 Feb 2025 22:28:25 +0100 Subject: [PATCH 05/12] Optimize soft_max --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index ad28acb6c18..58d4576baf2 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1428,12 +1428,12 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec // Inner map: key is pipeline name; value is the subgroup size. static std::unordered_map> gpu_pipeline_config = { {"RX 5700", { - {"im2col_f32", 64}, - {"im2col_f32_f16", 64} + {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, + {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, + {"im2col_f32", 64}, {"im2col_f32_f16", 64}, }} }; -// Helper function defined at namespace scope. static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { std::string foundKey; for (const auto &entry : gpu_pipeline_config) { From 9036e7a81d3c5ad1f07d21feee1383e2f9190b19 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sat, 15 Feb 2025 14:22:05 +0100 Subject: [PATCH 06/12] Apply map to all NAVI1* gpus --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 58d4576baf2..f4a7e95dc6b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1424,10 +1424,10 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec } // Define a configuration map per GPU. -// Outer key: GPU identifier (e.g. "RX 5700"). +// Outer key: GPU identifier (e.g. "NAVI1"). // Inner map: key is pipeline name; value is the subgroup size. static std::unordered_map> gpu_pipeline_config = { - {"RX 5700", { + {"NAVI1", { {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, {"im2col_f32", 64}, {"im2col_f32_f16", 64}, @@ -1584,7 +1584,7 @@ static void ggml_vk_load_shaders(vk_device& device) { required_subgroup_size = get_subgroup_size(name, device_name); if (required_subgroup_size == 0) { - required_subgroup_size = (device_name.find("RX 5700") != std::string::npos) ? 32 : required_subgroup_size; + required_subgroup_size = (device_name.find("NAVI1") != std::string::npos) ? 32 : required_subgroup_size; } if (!pipeline) { From d1519734d9b68799e197e87a4a1c8d5013845817 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sat, 15 Feb 2025 19:51:23 +0100 Subject: [PATCH 07/12] Improved gpu configuration map --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 60 ++++++++++++++++------------ 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index f4a7e95dc6b..1912c55e62b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1423,33 +1423,46 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } -// Define a configuration map per GPU. -// Outer key: GPU identifier (e.g. "NAVI1"). -// Inner map: key is pipeline name; value is the subgroup size. -static std::unordered_map> gpu_pipeline_config = { - {"NAVI1", { - {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, - {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, - {"im2col_f32", 64}, {"im2col_f32_f16", 64}, - }} +struct GpuPipelineConfig { + // List of all aliases for a given GPU. + // For example, this can include names like "NAVI10", "RX 5700", etc. + std::vector device_names; + + // Mapping of pipeline names to their specific subgroup sizes. + // Example: {"soft_max_f32", 64}. + std::unordered_map pipelines; + + // Default subgroup size for this GPU. + // Defaults to 0 if not explicitly provided. + uint32_t default_subgroup_size = 0; +}; + +// Define configurations for different GPUs. +static std::vector gpu_pipeline_configs = { + { + {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, + { + {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, + {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, + {"im2col_f32", 64}, {"im2col_f32_f16", 64}, + }, + 32 + }, }; static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { - std::string foundKey; - for (const auto &entry : gpu_pipeline_config) { - if (device_name.find(entry.first) != std::string::npos) { - foundKey = entry.first; - break; - } - } - if (!foundKey.empty()) { - auto &pipelineMap = gpu_pipeline_config[foundKey]; - auto pipIt = pipelineMap.find(pipeline_name); - if (pipIt != pipelineMap.end() && pipIt->second != 0) { - return pipIt->second; + for (const auto &config : gpu_pipeline_configs) { + for (const auto &alias : config.device_names) { + if (device_name.find(alias) != std::string::npos) { + auto pipIt = config.pipelines.find(pipeline_name); + if (pipIt != config.pipelines.end() && pipIt->second != 0) { + return pipIt->second; + } + return config.default_subgroup_size; + } } } - // If not defined, return 0. + // If no matching configuration is found, return 0. return 0; } @@ -1583,9 +1596,6 @@ static void ggml_vk_load_shaders(vk_device& device) { uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { required_subgroup_size = get_subgroup_size(name, device_name); - if (required_subgroup_size == 0) { - required_subgroup_size = (device_name.find("NAVI1") != std::string::npos) ? 32 : required_subgroup_size; - } if (!pipeline) { pipeline = std::make_shared(); From 0e5dd682626f761d8b6011e4405bcda78266b781 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sun, 16 Feb 2025 03:24:25 +0100 Subject: [PATCH 08/12] Cleanup im2col shader --- ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp index 639298ce65b..09aa849e881 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp @@ -68,14 +68,13 @@ void main() { continue; } - const uint iiw = int(current_ix) * p.s0 + int(current_kx) * p.d0 - p.p0; - const uint iih = oh_s1 + int(current_ky) * p.d1 - p.p1; + const uint iiw = current_ix * p.s0 + current_kx * p.d0 - p.p0; + const uint iih = oh_s1 + current_ky * p.d1 - p.p1; offset_dst[idx] = dst_base + current_ix * p.CHW + current_ky * p.KW + current_kx; - const bool valid = (iih >= 0 && iih < int(p.IH)) && (iiw >= 0 && iiw < int(p.IW)); - if (valid) { - values[idx] = data_a[src_base + uint(iih) * p.IW + uint(iiw)]; + if ((iih < p.IH) && (iiw < p.IW)) { + values[idx] = data_a[src_base + iih * p.IW + iiw]; } if (++current_ix == p.OW) { From 27f130196fab9f858f724cc1f4357afec745763a Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sat, 22 Feb 2025 02:32:52 +0100 Subject: [PATCH 09/12] Fix subgroup override --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 83 +++++++++++----------------- 1 file changed, 33 insertions(+), 50 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1912c55e62b..5fba8f542e4 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1423,49 +1423,6 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } -struct GpuPipelineConfig { - // List of all aliases for a given GPU. - // For example, this can include names like "NAVI10", "RX 5700", etc. - std::vector device_names; - - // Mapping of pipeline names to their specific subgroup sizes. - // Example: {"soft_max_f32", 64}. - std::unordered_map pipelines; - - // Default subgroup size for this GPU. - // Defaults to 0 if not explicitly provided. - uint32_t default_subgroup_size = 0; -}; - -// Define configurations for different GPUs. -static std::vector gpu_pipeline_configs = { - { - {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, - { - {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, - {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, - {"im2col_f32", 64}, {"im2col_f32_f16", 64}, - }, - 32 - }, -}; - -static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { - for (const auto &config : gpu_pipeline_configs) { - for (const auto &alias : config.device_names) { - if (device_name.find(alias) != std::string::npos) { - auto pipIt = config.pipelines.find(pipeline_name); - if (pipIt != config.pipelines.end() && pipIt->second != 0) { - return pipIt->second; - } - return config.default_subgroup_size; - } - } - } - // If no matching configuration is found, return 0. - return 0; -} - static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); @@ -1586,17 +1543,11 @@ static void ggml_vk_load_shaders(vk_device& device) { device->pipeline_matmul_id_f32 = std::make_shared(); } - vk::PhysicalDeviceProperties2 props2; - device->physical_device.getProperties2(&props2); - std::string device_name = props2.properties.deviceName.data(); - std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { - required_subgroup_size = get_subgroup_size(name, device_name); - if (!pipeline) { pipeline = std::make_shared(); pipeline->name = name; @@ -2723,6 +2674,36 @@ static vk_device ggml_vk_get_device(size_t idx) { return vk_instance.devices[idx]; } +struct GpuPipelineConfig { + // List of all aliases for a given GPU. + // For example, this can include names like "NAVI10", "RX 5700", etc. + std::vector device_names; + + // Default subgroup size for this GPU. + // Defaults to 0 if not explicitly provided. + uint32_t default_subgroup_size = 0; +}; + +// Define configurations for different GPUs. +static std::vector gpu_pipeline_configs = { + { + {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, + 32 + }, +}; + +static uint32_t get_subgroup_size(const std::string &device_name) { + for (const auto &config : gpu_pipeline_configs) { + for (const auto &alias : config.device_names) { + if (device_name.find(alias) != std::string::npos) { + return config.default_subgroup_size; + } + } + } + // If no matching configuration is found, return 0. + return 0; +} + static void ggml_vk_print_gpu_info(size_t idx) { GGML_ASSERT(idx < vk_instance.device_indices.size()); size_t dev_num = vk_instance.device_indices[idx]; @@ -2748,7 +2729,9 @@ static void ggml_vk_print_gpu_info(size_t idx) { subgroup_props.pNext = &driver_props; physical_device.getProperties2(&props2); - const size_t subgroup_size = subgroup_props.subgroupSize; + uint32_t default_subgroup_size = get_subgroup_size(props2.properties.deviceName.data()); + const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize; + const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; bool fp16_storage = false; From 9c9b812104f29925487b7a4f2046fa710337afcc Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sat, 22 Feb 2025 03:56:21 +0100 Subject: [PATCH 10/12] subgroup test improvements --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 66 +++++++++++++++------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 5fba8f542e4..82282f93f84 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1423,6 +1423,36 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } +struct GpuPipelineConfig { + // List of all aliases for a given GPU. + // For example, this can include names like "NAVI10", "RX 5700", etc. + std::vector device_names; + + // Default subgroup size for this GPU. + // Defaults to 0 if not explicitly provided. + uint32_t default_subgroup_size = 0; +}; + +// Define configurations for different GPUs. +static std::vector gpu_pipeline_configs = { + { + {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, + 32 + }, +}; + +static uint32_t get_subgroup_size(const std::string &device_name) { + for (const auto &config : gpu_pipeline_configs) { + for (const auto &alias : config.device_names) { + if (device_name.find(alias) != std::string::npos) { + return config.default_subgroup_size; + } + } + } + // If no matching configuration is found, return 0. + return 0; +} + static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); @@ -1543,11 +1573,17 @@ static void ggml_vk_load_shaders(vk_device& device) { device->pipeline_matmul_id_f32 = std::make_shared(); } + vk::PhysicalDeviceProperties2 props2; + device->physical_device.getProperties2(&props2); + std::string device_name = props2.properties.deviceName.data(); + std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { + required_subgroup_size = get_subgroup_size(device_name); + if (!pipeline) { pipeline = std::make_shared(); pipeline->name = name; @@ -2674,36 +2710,6 @@ static vk_device ggml_vk_get_device(size_t idx) { return vk_instance.devices[idx]; } -struct GpuPipelineConfig { - // List of all aliases for a given GPU. - // For example, this can include names like "NAVI10", "RX 5700", etc. - std::vector device_names; - - // Default subgroup size for this GPU. - // Defaults to 0 if not explicitly provided. - uint32_t default_subgroup_size = 0; -}; - -// Define configurations for different GPUs. -static std::vector gpu_pipeline_configs = { - { - {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, - 32 - }, -}; - -static uint32_t get_subgroup_size(const std::string &device_name) { - for (const auto &config : gpu_pipeline_configs) { - for (const auto &alias : config.device_names) { - if (device_name.find(alias) != std::string::npos) { - return config.default_subgroup_size; - } - } - } - // If no matching configuration is found, return 0. - return 0; -} - static void ggml_vk_print_gpu_info(size_t idx) { GGML_ASSERT(idx < vk_instance.device_indices.size()); size_t dev_num = vk_instance.device_indices[idx]; From 4a3988e94f2fe22386cda4e74be89023b85ab2f5 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Sat, 22 Feb 2025 04:30:39 +0100 Subject: [PATCH 11/12] Fix wave32 not correctly used --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 82282f93f84..5bcd4fbbabc 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1428,6 +1428,10 @@ struct GpuPipelineConfig { // For example, this can include names like "NAVI10", "RX 5700", etc. std::vector device_names; + // Mapping of pipeline names to their specific subgroup sizes. + // Example: {"soft_max_f32", 64}. + std::unordered_map pipelines; + // Default subgroup size for this GPU. // Defaults to 0 if not explicitly provided. uint32_t default_subgroup_size = 0; @@ -1437,14 +1441,23 @@ struct GpuPipelineConfig { static std::vector gpu_pipeline_configs = { { {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, + { + {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, + {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, + {"im2col_f32", 64}, {"im2col_f32_f16", 64}, + }, 32 }, }; -static uint32_t get_subgroup_size(const std::string &device_name) { +static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { for (const auto &config : gpu_pipeline_configs) { for (const auto &alias : config.device_names) { if (device_name.find(alias) != std::string::npos) { + auto pipIt = config.pipelines.find(pipeline_name); + if (pipIt != config.pipelines.end() && pipIt->second != 0) { + return pipIt->second; + } return config.default_subgroup_size; } } @@ -1582,7 +1595,7 @@ static void ggml_vk_load_shaders(vk_device& device) { uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { - required_subgroup_size = get_subgroup_size(device_name); + required_subgroup_size = get_subgroup_size(name, device_name); if (!pipeline) { pipeline = std::make_shared(); @@ -2735,7 +2748,7 @@ static void ggml_vk_print_gpu_info(size_t idx) { subgroup_props.pNext = &driver_props; physical_device.getProperties2(&props2); - uint32_t default_subgroup_size = get_subgroup_size(props2.properties.deviceName.data()); + uint32_t default_subgroup_size = get_subgroup_size("", props2.properties.deviceName.data()); const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize; const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; From c49419f7ab2539d4bd1db7c2c68c7b4c30433921 Mon Sep 17 00:00:00 2001 From: Daniele <57776841+daniandtheweb@users.noreply.github.com> Date: Wed, 26 Feb 2025 15:40:46 +0100 Subject: [PATCH 12/12] Removed subgroup changes --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 53 +--------------------------- 1 file changed, 1 insertion(+), 52 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 5bcd4fbbabc..d32ba4efbc9 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1423,49 +1423,6 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec return supported; } -struct GpuPipelineConfig { - // List of all aliases for a given GPU. - // For example, this can include names like "NAVI10", "RX 5700", etc. - std::vector device_names; - - // Mapping of pipeline names to their specific subgroup sizes. - // Example: {"soft_max_f32", 64}. - std::unordered_map pipelines; - - // Default subgroup size for this GPU. - // Defaults to 0 if not explicitly provided. - uint32_t default_subgroup_size = 0; -}; - -// Define configurations for different GPUs. -static std::vector gpu_pipeline_configs = { - { - {"NAVI10", "NAVI14", "RX 5700", "RX 5600", "RX 5500"}, - { - {"soft_max_f32", 64}, {"soft_max_f32_wg512", 64}, - {"soft_max_f32_f16", 64}, {"soft_max_f32_f16_wg512", 64}, - {"im2col_f32", 64}, {"im2col_f32_f16", 64}, - }, - 32 - }, -}; - -static uint32_t get_subgroup_size(const std::string &pipeline_name, const std::string &device_name) { - for (const auto &config : gpu_pipeline_configs) { - for (const auto &alias : config.device_names) { - if (device_name.find(alias) != std::string::npos) { - auto pipIt = config.pipelines.find(pipeline_name); - if (pipIt != config.pipelines.end() && pipIt->second != 0) { - return pipIt->second; - } - return config.default_subgroup_size; - } - } - } - // If no matching configuration is found, return 0. - return 0; -} - static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); @@ -1586,17 +1543,11 @@ static void ggml_vk_load_shaders(vk_device& device) { device->pipeline_matmul_id_f32 = std::make_shared(); } - vk::PhysicalDeviceProperties2 props2; - device->physical_device.getProperties2(&props2); - std::string device_name = props2.properties.deviceName.data(); - std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { - required_subgroup_size = get_subgroup_size(name, device_name); - if (!pipeline) { pipeline = std::make_shared(); pipeline->name = name; @@ -2748,9 +2699,7 @@ static void ggml_vk_print_gpu_info(size_t idx) { subgroup_props.pNext = &driver_props; physical_device.getProperties2(&props2); - uint32_t default_subgroup_size = get_subgroup_size("", props2.properties.deviceName.data()); - const size_t subgroup_size = (default_subgroup_size != 0) ? default_subgroup_size : subgroup_props.subgroupSize; - + const size_t subgroup_size = subgroup_props.subgroupSize; const bool uma = props2.properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; bool fp16_storage = false;