diff --git a/mlir/test/Examples/NVGPU/Ch0.py b/mlir/test/Examples/NVGPU/Ch0.py index 8f60088178d11..e09720a0f3b75 100644 --- a/mlir/test/Examples/NVGPU/Ch0.py +++ b/mlir/test/Examples/NVGPU/Ch0.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 0 : Hello World @@ -33,7 +37,7 @@ def kernel(): # + operator generates arith.addi myValue = alpha + tidx # Print from a GPU thread - gpu.printf("GPU thread %llu has %llu\n", [tidx, myValue]) + gpu.printf("GPU thread %llu has %llu\n", tidx, myValue) # 3. Call the GPU kernel kernel() @@ -43,8 +47,24 @@ def kernel(): # 4. The `mlir_func` decorator JIT compiles the IR and executes the MLIR function. main(alpha) - # CHECK: GPU thread 0 has 100 # CHECK: GPU thread 1 has 101 # CHECK: GPU thread 2 has 102 # CHECK: GPU thread 3 has 103 + +# DUMPIR: func.func @main(%arg0: index) attributes {llvm.emit_c_interface} { +# DUMPIR: %[[C0_I32:.*]] = arith.constant 0 : i32 +# DUMPIR: %[[C1:.*]] = arith.constant 1 : index +# DUMPIR: %[[C1_0:.*]] = arith.constant 1 : index +# DUMPIR: %[[C1_1:.*]] = arith.constant 1 : index +# DUMPIR: %[[C4:.*]] = arith.constant 4 : index +# DUMPIR: %[[C1_2:.*]] = arith.constant 1 : index +# DUMPIR: %[[C1_3:.*]] = arith.constant 1 : index +# DUMPIR: gpu.launch blocks(%arg1, %arg2, %arg3) in (%arg7 = %[[C1]], %arg8 = %[[C1_0]], %arg9 = %[[C1_1]]) threads(%arg4, %arg5, %arg6) in (%arg10 = %[[C4]], %arg11 = %[[C1_2]], %arg12 = %[[C1_3]]) dynamic_shared_memory_size %[[C0_I32]] { +# DUMPIR: %[[TIDX:.*]] = gpu.thread_id x +# DUMPIR: %[[MYVAL:.*]] = arith.addi %arg0, %[[TIDX]] : index +# DUMPIR: gpu.printf "GPU thread %llu has %llu\0A", %[[TIDX]], %[[MYVAL]] : index, index +# DUMPIR: gpu.terminator +# DUMPIR: } +# DUMPIR: return +# DUMPIR: } diff --git a/mlir/test/Examples/NVGPU/Ch1.py b/mlir/test/Examples/NVGPU/Ch1.py index cfb48d56f8d49..6e44e4d04fa06 100644 --- a/mlir/test/Examples/NVGPU/Ch1.py +++ b/mlir/test/Examples/NVGPU/Ch1.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 1 : 2D Saxpy @@ -24,12 +28,12 @@ def saxpy(x, y, alpha): # 1. Use MLIR GPU dialect to allocate and copy memory token_ty = gpu.AsyncTokenType.get() - t1 = gpu.wait(token_ty, []) + t1 = gpu.wait([]) x_dev, t2 = gpu.alloc(x.type, token_ty, [t1], [], []) y_dev, t3 = gpu.alloc(y.type, token_ty, [t2], [], []) t4 = gpu.memcpy(token_ty, [t3], x_dev, x) t5 = gpu.memcpy(token_ty, [t4], y_dev, y) - t6 = gpu.wait(token_ty, [t5]) + t6 = gpu.wait([t5]) # 2. Compute 2D SAXPY kernel @NVDSL.mlir_gpu_launch(grid=(M, 1, 1), block=(N, 1, 1)) @@ -47,7 +51,7 @@ def saxpy_kernel(): saxpy_kernel() t7 = gpu.memcpy(token_ty, [t6], y, y_dev) - gpu.wait(token_ty, [t7]) + gpu.wait([t7]) # 3. Pass numpy arrays to MLIR @@ -56,11 +60,32 @@ def saxpy_kernel(): alpha = 2.0 x = np.random.randn(M, N).astype(np.float32) y = np.ones((M, N), np.float32) + saxpy(x, y, alpha) -# 4. Verify MLIR with reference computation -ref = np.ones((M, N), np.float32) -ref += x * alpha -np.testing.assert_allclose(y, ref, rtol=5e-03, atol=1e-01) -print("PASS") +if os.getenv("MLIR_NVDSL_PRINT_IR") != "1": + # 4. Verify MLIR with reference computation + ref = np.ones((M, N), np.float32) + ref += x * alpha + np.testing.assert_allclose(y, ref, rtol=5e-03, atol=1e-01) + print("PASS") # CHECK-NOT: Mismatched elements +# CHECK: PASS + +# DUMPIR: func.func @saxpy(%[[ARG0:.*]]: memref<256x32xf32>, %[[ARG1:.*]]: memref<256x32xf32>, %[[ARG2:.*]]: f32) attributes {llvm.emit_c_interface} { +# DUMPIR: %[[WAIT0:.*]] = gpu.wait async +# DUMPIR: %[[MEMREF:.*]], %[[ASYNC0:.*]] = gpu.alloc async [%[[WAIT0]]] () : memref<256x32xf32> +# DUMPIR: %[[MEMREF0:.*]], %[[ASYNC1:.*]] = gpu.alloc async [%[[ASYNC0]]] () : memref<256x32xf32> +# DUMPIR: %[[MEMCPY1:.*]] = gpu.memcpy async [%[[ASYNC1]]] %[[MEMREF]], %[[ARG0]] : memref<256x32xf32>, memref<256x32xf32> +# DUMPIR: %[[MEMCPY2:.*]] = gpu.memcpy async [%[[MEMCPY1]]] %[[MEMREF0]], %[[ARG1]] : memref<256x32xf32>, memref<256x32xf32> +# DUMPIR: %[[WAIT1:.*]] = gpu.wait async [%[[MEMCPY2]]] +# DUMPIR: %[[LD0:.*]] = memref.load %[[MEMREF]][%{{.*}}, %{{.*}}] : memref<256x32xf32> +# DUMPIR: %[[LD1:.*]] = memref.load %[[MEMREF0]][%{{.*}}, %{{.*}}] : memref<256x32xf32> +# DUMPIR: %[[MUL:.*]] = arith.mulf %[[LD0]], %[[ARG2]] : f32 +# DUMPIR: %[[ADD:.*]] = arith.addf %[[LD1]], %[[MUL]] : f32 +# DUMPIR: memref.store %[[ADD]], %[[MEMREF0]][%{{.*}}, %{{.*}}] : memref<256x32xf32> +# DUMPIR: gpu.terminator +# DUMPIR: %[[MEMCPY3:.*]] = gpu.memcpy async [%[[WAIT1]]] %[[ARG1]], %[[MEMREF0]] : memref<256x32xf32>, memref<256x32xf32> +# DUMPIR: %[[WAIT2:.*]] = gpu.wait async [%[[MEMCPY3]]] +# DUMPIR: return +# DUMPIR: } diff --git a/mlir/test/Examples/NVGPU/Ch2.py b/mlir/test/Examples/NVGPU/Ch2.py index 729913c6d5c4f..aba610cee0b34 100644 --- a/mlir/test/Examples/NVGPU/Ch2.py +++ b/mlir/test/Examples/NVGPU/Ch2.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 2 : 2D Saxpy with TMA @@ -28,12 +32,12 @@ @NVDSL.mlir_func def saxpy(x, y, alpha): token_ty = gpu.AsyncTokenType.get() - t1 = gpu.wait(token_ty, []) + t1 = gpu.wait([]) x_dev, t2 = gpu.alloc(x.type, token_ty, [t1], [], []) y_dev, t3 = gpu.alloc(y.type, token_ty, [t2], [], []) t4 = gpu.memcpy(token_ty, [t3], x_dev, x) t5 = gpu.memcpy(token_ty, [t4], y_dev, y) - t6 = gpu.wait(token_ty, [t5]) + t6 = gpu.wait([t5]) x_tma = TMA([1, N], x.type) y_tma = TMA([1, N], y.type) @@ -74,7 +78,7 @@ def saxpy_tma_kernel(): saxpy_tma_kernel() t7 = gpu.memcpy(token_ty, [t6], y, y_dev) - gpu.wait(token_ty, [t7]) + gpu.wait([t7]) # 3. Pass numpy arrays to MLIR @@ -85,9 +89,46 @@ def saxpy_tma_kernel(): y = np.ones((M, N), np.float32) saxpy(x, y, alpha) -# 4. Verify MLIR with reference computation -ref = np.ones((M, N), np.float32) -ref += x * alpha -np.testing.assert_allclose(y, ref, rtol=5e-03, atol=1e-01) -print("PASS") +if os.getenv("MLIR_NVDSL_PRINT_IR") != "1": + # 4. Verify MLIR with reference computation + ref = np.ones((M, N), np.float32) + ref += x * alpha + np.testing.assert_allclose(y, ref, rtol=5e-03, atol=1e-01) + print("PASS") # CHECK-NOT: Mismatched elements +# CHECK: PASS + +# DUMPIR: func.func @saxpy(%{{.*}}: memref<256x32xf32>, %[[ARG1:.*]]: memref<256x32xf32>, %[[ARG2:.*]]: f32) attributes {llvm.emit_c_interface} { +# DUMPIR: %[[WAIT0:.*]] = gpu.wait async +# DUMPIR: %[[MEMREF:.*]], %[[ASYNC0:.*]] = gpu.alloc async [%[[WAIT0]]] () : memref<256x32xf32> +# DUMPIR: %[[CAST:.*]] = memref.cast %[[MEMREF]] : memref<256x32xf32> to memref<*xf32> +# DUMPIR: %[[C1:.*]] = arith.constant 1 : index +# DUMPIR: %[[C32:.*]] = arith.constant 32 : index +# DUMPIR: %[[TMA0:.*]] = nvgpu.tma.create.descriptor %[[CAST]] box[%[[C1]], %[[C32]]] : memref<*xf32> -> , swizzle = none, l2promo = none, oob = zero, interleave = none> +# DUMPIR: %[[C0:.*]] = arith.constant 0 : index +# DUMPIR: %[[EQ:.*]] = arith.cmpi eq, %{{.*}}, %[[C0]] : index +# DUMPIR: %[[MB:.*]] = nvgpu.mbarrier.create -> > +# DUMPIR: %[[C0_10:.*]] = arith.constant 0 : index +# DUMPIR: %[[C1_11:.*]] = arith.constant 1 : index +# DUMPIR: nvgpu.mbarrier.init %[[MB]][%[[C0_10]]], %[[C1_11]], predicate = %[[EQ]] : > +# DUMPIR: %[[DSM0:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C0_12:.*]] = arith.constant 0 : index +# DUMPIR: %[[VIEW:.*]] = memref.view %[[DSM0]][%[[C0_12]]][] : memref> to memref<1x32xf32, #gpu.address_space> +# DUMPIR: %[[DSM1:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C128:.*]] = arith.constant 128 : index +# DUMPIR: %[[VIEW_13:.*]] = memref.view %[[DSM1]][%[[C128]]][] : memref> to memref<1x32xf32, #gpu.address_space> +# DUMPIR: nvgpu.tma.async.load %[[TMA0]][%{{.*}}, %{{.*}}], %[[MB]][%{{.*}}] to %[[VIEW]], predicate = %[[EQ]] : , swizzle = none, l2promo = none, oob = zero, interleave = none>, > -> memref<1x32xf32, #gpu.address_space> +# DUMPIR: nvgpu.mbarrier.arrive.expect_tx %[[MB]][%{{.*}}], %{{.*}}, predicate = %[[EQ]] : > +# DUMPIR: %[[C0_20:.*]] = arith.constant 0 : index +# DUMPIR: %[[C10000000:.*]] = arith.constant 10000000 : index +# DUMPIR: %[[FALSE:.*]] = arith.constant false +# DUMPIR: nvgpu.mbarrier.try_wait.parity %[[MB]][%[[C0_20]]], %[[FALSE]], %[[C10000000]] : > +# DUMPIR: %[[C0_21:.*]] = arith.constant 0 : index +# DUMPIR: %[[LD0:.*]] = memref.load %[[VIEW]][%[[C0_21]], %{{.*}}] : memref<1x32xf32, #gpu.address_space> +# DUMPIR: %[[C0_22:.*]] = arith.constant 0 : index +# DUMPIR: %[[LD1:.*]] = memref.load %[[VIEW_13]][%[[C0_22]], %{{.*}}] : memref<1x32xf32, #gpu.address_space> +# DUMPIR: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<256x32xf32> +# DUMPIR: %[[MEMCPY3:.*]] = gpu.memcpy async [%{{.*}}] %[[ARG1]], %{{.*}} : memref<256x32xf32>, memref<256x32xf32> +# DUMPIR: %{{.*}} = gpu.wait async [%[[MEMCPY3]]] +# DUMPIR: return +# DUMPIR: } diff --git a/mlir/test/Examples/NVGPU/Ch3.py b/mlir/test/Examples/NVGPU/Ch3.py index eb96b11c63416..fe11575416866 100644 --- a/mlir/test/Examples/NVGPU/Ch3.py +++ b/mlir/test/Examples/NVGPU/Ch3.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 3 : GEMM 128x128x64 with Tensor Core @@ -60,13 +64,13 @@ def tma_load( @NVDSL.mlir_func def gemm_128_128_64(a, b, d): token_ty = gpu.AsyncTokenType.get() - t1 = gpu.wait(token_ty, []) + t1 = gpu.wait([]) a_dev, t2 = gpu.alloc(a.type, token_ty, [t1], [], []) b_dev, t3 = gpu.alloc(b.type, token_ty, [t2], [], []) d_dev, t4 = gpu.alloc(d.type, token_ty, [t3], [], []) t5 = gpu.memcpy(token_ty, [t4], a_dev, a) t6 = gpu.memcpy(token_ty, [t5], b_dev, b) - t7 = gpu.wait(token_ty, [t6]) + t7 = gpu.wait([t6]) sw = nvgpu.TensorMapSwizzleKind.SWIZZLE_128B a_tma = TMA([128, 64], a.type, swizzle=sw) @@ -111,7 +115,7 @@ def gemm_tma_kernel(): gemm_tma_kernel() t8 = gpu.memcpy(token_ty, [t7], d, d_dev) - gpu.wait(None, [t8]) + gpu.wait([t8]) # Python pass arguments to MLIR @@ -123,7 +127,73 @@ def gemm_tma_kernel(): d = np.zeros((M, N), np.float32) gemm_128_128_64(a, b, d) -ref_d = a.astype(np.float16) @ b.astype(np.float16) -np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) -print("PASS") +if os.getenv("MLIR_NVDSL_PRINT_IR") != "1": + # Verify MLIR program with reference computation in python + ref_d = a.astype(np.float16) @ b.astype(np.float16) + np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) + print("PASS") # CHECK-NOT: Mismatched elements +# CHECK: PASS + +# DUMPIR: func.func @gemm_128_128_64(%{{.*}}: memref<128x64xf16>, %{{.*}}: memref<64x128xf16>, %[[ARG2:.*]]: memref<128x128xf32>) attributes {llvm.emit_c_interface} { +# DUMPIR: %[[C128:.*]] = arith.constant 128 : index +# DUMPIR: %[[C64:.*]] = arith.constant 64 : index +# DUMPIR: %[[TMA0:.*]] = nvgpu.tma.create.descriptor %{{.*}} box[%[[C128]], %[[C64]]] : memref<*xf16> -> , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: %[[CAST1:.*]] = memref.cast %{{.*}} : memref<64x128xf16> to memref<*xf16> +# DUMPIR: %[[C64_5:.*]] = arith.constant 64 : index +# DUMPIR: %[[C64_6:.*]] = arith.constant 64 : index +# DUMPIR: %[[TMA1:.*]] = nvgpu.tma.create.descriptor %[[CAST1]] box[%[[C64_5]], %[[C64_6]]] : memref<*xf16> -> , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: %[[THREADID:.*]] = gpu.thread_id x +# DUMPIR: %[[MB:.*]] = nvgpu.mbarrier.create -> > +# DUMPIR: %[[C0:.*]] = arith.constant 0 : index +# DUMPIR: %[[EQ:.*]] = arith.cmpi eq, %[[THREADID]], %[[C0]] : index +# DUMPIR: %[[C0_12:.*]] = arith.constant 0 : index +# DUMPIR: %[[C1_13:.*]] = arith.constant 1 : index +# DUMPIR: nvgpu.mbarrier.init %[[MB]][%[[C0_12]]], %[[C1_13]], predicate = %[[EQ]] : > +# DUMPIR: nvgpu.tma.prefetch.descriptor %[[TMA0]], predicate = %[[EQ]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: nvgpu.tma.prefetch.descriptor %[[TMA1]], predicate = %[[EQ]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: %[[DSM0:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C0_14:.*]] = arith.constant 0 : index +# DUMPIR: %[[VIEW:.*]] = memref.view %[[DSM0]][%[[C0_14]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[DSM1:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C16384:.*]] = arith.constant 16384 : index +# DUMPIR: %[[VIEW_15:.*]] = memref.view %[[DSM1]][%[[C16384]]][] : memref> to memref<64x128xf16, #gpu.address_space> +# DUMPIR: %[[DSM2:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C0_16:.*]] = arith.constant 0 : index +# DUMPIR: %[[VIEW_17:.*]] = memref.view %[[DSM2]][%[[C0_16]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[DSM3:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C16384_18:.*]] = arith.constant 16384 : index +# DUMPIR: %[[VIEW_19:.*]] = memref.view %[[DSM3]][%[[C16384_18]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[DSM4:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C24576:.*]] = arith.constant 24576 : index +# DUMPIR: %[[VIEW_20:.*]] = memref.view %[[DSM4]][%[[C24576]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C0_21:.*]] = arith.constant 0 : index +# DUMPIR: %[[C32768:.*]] = arith.constant 32768 : index +# DUMPIR: nvgpu.mbarrier.arrive.expect_tx %[[MB]][%[[C0_21]]], %[[C32768]], predicate = %[[EQ]] : > +# DUMPIR: %[[C0_22:.*]] = arith.constant 0 : index +# DUMPIR: %[[C0_23:.*]] = arith.constant 0 : index +# DUMPIR: %[[C0_24:.*]] = arith.constant 0 : index +# DUMPIR: nvgpu.tma.async.load %[[TMA0]][%[[C0_23]], %[[C0_24]]], %[[MB]][%[[C0_22]]] to %[[VIEW_17]], predicate = %[[EQ]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, > -> memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[C0_25:.*]] = arith.constant 0 : index +# DUMPIR: %[[C0_26:.*]] = arith.constant 0 : index +# DUMPIR: %[[C0_27:.*]] = arith.constant 0 : index +# DUMPIR: nvgpu.tma.async.load %[[TMA1]][%[[C0_26]], %[[C0_27]]], %[[MB]][%[[C0_25]]] to %[[VIEW_19]], predicate = %[[EQ]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, > -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C0_28:.*]] = arith.constant 0 : index +# DUMPIR: %[[C64_29:.*]] = arith.constant 64 : index +# DUMPIR: %[[C0_30:.*]] = arith.constant 0 : index +# DUMPIR: nvgpu.tma.async.load %[[TMA1]][%[[C64_29]], %[[C0_30]]], %[[MB]][%[[C0_28]]] to %[[VIEW_20]], predicate = %[[EQ]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, > -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C0_31:.*]] = arith.constant 0 : index +# DUMPIR: %[[C10000000:.*]] = arith.constant 10000000 : index +# DUMPIR: %[[FALSE:.*]] = arith.constant false +# DUMPIR: nvgpu.mbarrier.try_wait.parity %[[MB]][%[[C0_31]]], %[[FALSE]], %[[C10000000]] : > +# DUMPIR: %[[WG_ACC:.*]] = nvgpu.warpgroup.mma.init.accumulator -> > +# DUMPIR: %[[GEN0:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW]], %[[TMA0]] : memref<128x64xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[GEN1:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW_15]], %[[TMA1]] : memref<64x128xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[MMA:.*]] = nvgpu.warpgroup.mma %[[GEN0]], %[[GEN1]], %[[WG_ACC]] {transposeB} : >>, >>, > -> > +# DUMPIR: nvgpu.warpgroup.mma.store %[[MMA]], %{{.*}} : > to memref<128x128xf32> +# DUMPIR: gpu.terminator +# DUMPIR: } +# DUMPIR: %[[CPY3:.*]] = gpu.memcpy async [%{{.*}}] %[[ARG2]], %{{.*}} : memref<128x128xf32>, memref<128x128xf32> +# DUMPIR: gpu.wait async [%[[CPY3]]] +# DUMPIR: return +# DUMPIR: } diff --git a/mlir/test/Examples/NVGPU/Ch4.py b/mlir/test/Examples/NVGPU/Ch4.py index 0e3460ff8d63b..dffafda7f21c9 100644 --- a/mlir/test/Examples/NVGPU/Ch4.py +++ b/mlir/test/Examples/NVGPU/Ch4.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 4 : Multistage GEMM with Tensor Core @@ -259,13 +263,13 @@ def epilogue(D: WGMMAMatrix, d_dev): @NVDSL.mlir_func def gemm_multistage(a, b, d, num_stages): token_ty = gpu.AsyncTokenType.get() - t1 = gpu.wait(token_ty, []) + t1 = gpu.wait([]) a_dev, t2 = gpu.alloc(a.type, token_ty, [t1], [], []) b_dev, t3 = gpu.alloc(b.type, token_ty, [t2], [], []) d_dev, t4 = gpu.alloc(d.type, token_ty, [t3], [], []) t5 = gpu.memcpy(token_ty, [t4], a_dev, a) t6 = gpu.memcpy(token_ty, [t5], b_dev, b) - t7 = gpu.wait(token_ty, [t6]) + t7 = gpu.wait([t6]) sw = nvgpu.TensorMapSwizzleKind.SWIZZLE_128B a_tma = TMA([128, 64], a.type, swizzle=sw) @@ -297,7 +301,7 @@ def gemm_multistage_kernel(): gemm_multistage_kernel() t8 = gpu.memcpy(token_ty, [t7], d, d_dev) - gpu.wait(None, [t8]) + gpu.wait([t8]) # Python pass arguments to MLIR @@ -313,11 +317,153 @@ def gemm_multistage_kernel(): gemm_multistage(a, b, d, num_stages=7) +if os.getenv("MLIR_NVDSL_PRINT_IR") != "1": + # Verify MLIR with reference computation + ref_d = a.astype(np.float16) @ b.astype(np.float16) + np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) -# Verify MLIR with reference computation -ref_d = a.astype(np.float16) @ b.astype(np.float16) -np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) - - -print("PASS") + print("PASS") # CHECK-NOT: Mismatched elements +# CHECK: PASS + +# DUMPIR: func.func @gemm_multistage(%{{.*}}: memref<512x1024xf16>, %{{.*}}: memref<1024x256xf16>, %{{.*}}: memref<512x256xf32>) attributes {llvm.emit_c_interface} { +# DUMPIR: scf.if %{{.*}} { +# DUMPIR: %[[C0_INIT:.*]] = arith.constant 0 : index +# DUMPIR: %[[C7:.*]] = arith.constant 7 : index +# DUMPIR: %[[C1_INIT:.*]] = arith.constant 1 : index +# DUMPIR: scf.for %arg15 = %[[C0_INIT]] to %[[C7]] step %[[C1_INIT]] { +# DUMPIR: %[[C1_MBAR:.*]] = arith.constant 1 : index +# DUMPIR: nvgpu.mbarrier.init %{{.*}}[%arg15], %[[C1_MBAR]] : , num_barriers = 7> +# DUMPIR: } +# DUMPIR: nvgpu.tma.prefetch.descriptor %{{.*}} : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: nvgpu.tma.prefetch.descriptor %{{.*}} : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: } +# DUMPIR: %[[C0_PROLOGUE:.*]] = arith.constant 0 : index +# DUMPIR: %[[C6:.*]] = arith.constant 6 : index +# DUMPIR: %[[C1_PROLOGUE:.*]] = arith.constant 1 : index +# DUMPIR: scf.for %arg15 = %[[C0_PROLOGUE]] to %[[C6]] step %[[C1_PROLOGUE]] { +# DUMPIR: %[[BID_X_P:.*]] = gpu.block_id x +# DUMPIR: %[[BID_Y_P:.*]] = gpu.block_id y +# DUMPIR: %[[C128_P1:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMX_P:.*]] = arith.muli %[[BID_X_P]], %[[C128_P1]] : index +# DUMPIR: %[[C128_P2:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMY_P:.*]] = arith.muli %[[BID_Y_P]], %[[C128_P2]] : index +# DUMPIR: %{{.*}} = gpu.thread_id x +# DUMPIR: %[[TID_X_P:.*]] = gpu.thread_id x +# DUMPIR: %[[C0_P:.*]] = arith.constant 0 : index +# DUMPIR: %[[PRED_P:.*]] = arith.cmpi eq, %[[TID_X_P]], %[[C0_P]] : index +# DUMPIR: %[[C16384_P1:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_A_P:.*]] = arith.muli %arg15, %[[C16384_P1]] : index +# DUMPIR: %[[C16384_P2:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_B_BASE_P:.*]] = arith.muli %arg15, %[[C16384_P2]] : index +# DUMPIR: %[[C114688:.*]] = arith.constant 114688 : index +# DUMPIR: %[[OFF_B1_P:.*]] = arith.addi %[[OFF_B_BASE_P]], %[[C114688]] : index +# DUMPIR: %[[C8192:.*]] = arith.constant 8192 : index +# DUMPIR: %[[OFF_B2_P:.*]] = arith.addi %[[OFF_B1_P]], %[[C8192]] : index +# DUMPIR: %[[SMEM_A_P:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_A_P:.*]] = memref.view %[[SMEM_A_P]][%[[OFF_A_P]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_B1_P:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B1_P:.*]] = memref.view %[[SMEM_B1_P]][%[[OFF_B1_P]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_B2_P:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B2_P:.*]] = memref.view %[[SMEM_B2_P]][%[[OFF_B2_P]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C32768:.*]] = arith.constant 32768 : index +# DUMPIR: nvgpu.mbarrier.arrive.expect_tx %{{.*}}[%arg15], %[[C32768]], predicate = %[[PRED_P]] : , num_barriers = 7> +# DUMPIR: %[[C64_K_P:.*]] = arith.constant 64 : index +# DUMPIR: %[[K_COORD_P:.*]] = arith.muli %arg15, %[[C64_K_P]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[K_COORD_P]], %[[DIMX_P]]], %{{.*}}[%arg15] to %[[VIEW_A_P]], predicate = %[[PRED_P]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<128x64xf16, #gpu.address_space> +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIMY_P]], %[[K_COORD_P]]], %{{.*}}[%arg15] to %[[VIEW_B1_P]], predicate = %[[PRED_P]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C64_OFF:.*]] = arith.constant 64 : index +# DUMPIR: %[[DIMY_P_OFF:.*]] = arith.addi %[[DIMY_P]], %[[C64_OFF]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIMY_P_OFF]], %[[K_COORD_P]]], %{{.*}}[%arg15] to %[[VIEW_B2_P]], predicate = %[[PRED_P]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: } +# DUMPIR: %[[TID_X_LOOP:.*]] = gpu.thread_id x +# DUMPIR: %[[ACC_INIT:.*]] = nvgpu.warpgroup.mma.init.accumulator -> > +# DUMPIR: %[[FALSE_LOOP:.*]] = arith.constant false +# DUMPIR: %[[C0_LOOP:.*]] = arith.constant 0 : index +# DUMPIR: %[[C16_LOOP:.*]] = arith.constant 16 : index +# DUMPIR: %[[C1_LOOP:.*]] = arith.constant 1 : index +# DUMPIR: %[[LOOP_RES:.*]]:2 = scf.for %arg15 = %[[C0_LOOP]] to %[[C16_LOOP]] step %[[C1_LOOP]] iter_args(%arg16 = %[[ACC_INIT]], %arg17 = %[[FALSE_LOOP]]) -> (!nvgpu.warpgroup.accumulator>, i1) { +# DUMPIR: %[[C7_L:.*]] = arith.constant 7 : index +# DUMPIR: %[[STAGE_L:.*]] = arith.remui %arg15, %[[C7_L]] : index +# DUMPIR: %[[C10M:.*]] = arith.constant 10000000 : index +# DUMPIR: nvgpu.mbarrier.try_wait.parity %{{.*}}[%[[STAGE_L]]], %arg17, %[[C10M]] : , num_barriers = 7> +# DUMPIR: %[[C16384_L:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_A_L:.*]] = arith.muli %[[STAGE_L]], %[[C16384_L]] : index +# DUMPIR: %[[C114688_L:.*]] = arith.constant 114688 : index +# DUMPIR: %[[OFF_B_L:.*]] = arith.addi %[[OFF_A_L]], %[[C114688_L]] : index +# DUMPIR: %[[SMEM_A_L:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_A_L:.*]] = memref.view %[[SMEM_A_L]][%[[OFF_A_L]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_B_L:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B_L:.*]] = memref.view %[[SMEM_B_L]][%[[OFF_B_L]]][] : memref> to memref<64x128xf16, #gpu.address_space> +# DUMPIR: %[[DESC_A_L:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW_A_L]], %{{.*}} : memref<128x64xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[DESC_B_L:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW_B_L]], %{{.*}} : memref<64x128xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[ACC_L:.*]] = nvgpu.warpgroup.mma %[[DESC_A_L]], %[[DESC_B_L]], %arg16 {transposeB} : >>, >>, > -> > +# DUMPIR: %[[C6_NEXT:.*]] = arith.constant 6 : index +# DUMPIR: %[[ITER_NEXT:.*]] = arith.addi %arg15, %[[C6_NEXT]] : index +# DUMPIR: %[[C16_CMP:.*]] = arith.constant 16 : index +# DUMPIR: %[[IN_RANGE:.*]] = arith.cmpi ult, %[[ITER_NEXT]], %[[C16_CMP]] : index +# DUMPIR: %[[C0_CMP:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_THREAD0_L:.*]] = arith.cmpi eq, %[[TID_X_LOOP]], %[[C0_CMP]] : index +# DUMPIR: %[[DO_LOAD:.*]] = arith.andi %[[IN_RANGE]], %[[IS_THREAD0_L]] : i1 +# DUMPIR: %[[C6_STAGE:.*]] = arith.constant 6 : index +# DUMPIR: %[[STAGE_NEXT_L:.*]] = arith.addi %arg15, %[[C6_STAGE]] : index +# DUMPIR: %[[C7_MOD:.*]] = arith.constant 7 : index +# DUMPIR: %[[STAGE_LOAD:.*]] = arith.remui %[[STAGE_NEXT_L]], %[[C7_MOD]] : index +# DUMPIR: %[[BID_X_L:.*]] = gpu.block_id x +# DUMPIR: %[[BID_Y_L:.*]] = gpu.block_id y +# DUMPIR: %[[C128_L1:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMX_L:.*]] = arith.muli %[[BID_X_L]], %[[C128_L1]] : index +# DUMPIR: %[[C128_L2:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMY_L:.*]] = arith.muli %[[BID_Y_L]], %[[C128_L2]] : index +# DUMPIR: %[[TID_X_L1:.*]] = gpu.thread_id x +# DUMPIR: %[[TID_X_L2:.*]] = gpu.thread_id x +# DUMPIR: %[[C16384_LA1:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_A_LOAD:.*]] = arith.muli %[[STAGE_LOAD]], %[[C16384_LA1]] : index +# DUMPIR: %[[C16384_LA2:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_B_BASE_LOAD:.*]] = arith.muli %[[STAGE_LOAD]], %[[C16384_LA2]] : index +# DUMPIR: %[[C114688_LOAD:.*]] = arith.constant 114688 : index +# DUMPIR: %[[OFF_B1_LOAD:.*]] = arith.addi %[[OFF_B_BASE_LOAD]], %[[C114688_LOAD]] : index +# DUMPIR: %[[C8192_LOAD:.*]] = arith.constant 8192 : index +# DUMPIR: %[[OFF_B2_LOAD:.*]] = arith.addi %[[OFF_B1_LOAD]], %[[C8192_LOAD]] : index +# DUMPIR: %[[SMEM_A_LOAD:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_A_LOAD:.*]] = memref.view %[[SMEM_A_LOAD]][%[[OFF_A_LOAD]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_B1_LOAD:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B1_LOAD:.*]] = memref.view %[[SMEM_B1_LOAD]][%[[OFF_B1_LOAD]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_B2_LOAD:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B2_LOAD:.*]] = memref.view %[[SMEM_B2_LOAD]][%[[OFF_B2_LOAD]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C32768_LOAD:.*]] = arith.constant 32768 : index +# DUMPIR: nvgpu.mbarrier.arrive.expect_tx %{{.*}}[%[[STAGE_LOAD]]], %[[C32768_LOAD]], predicate = %[[DO_LOAD]] : , num_barriers = 7> +# DUMPIR: %[[C64_K_LOAD:.*]] = arith.constant 64 : index +# DUMPIR: %[[K_COORD_LOAD:.*]] = arith.muli %[[STAGE_NEXT_L]], %[[C64_K_LOAD]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[K_COORD_LOAD]], %[[DIMX_L]]], %{{.*}}[%[[STAGE_LOAD]]] to %[[VIEW_A_LOAD]], predicate = %[[DO_LOAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<128x64xf16, #gpu.address_space> +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIMY_L]], %[[K_COORD_LOAD]]], %{{.*}}[%[[STAGE_LOAD]]] to %[[VIEW_B1_LOAD]], predicate = %[[DO_LOAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C64_OFF_LOAD:.*]] = arith.constant 64 : index +# DUMPIR: %[[DIMY_L_OFF:.*]] = arith.addi %[[DIMY_L]], %[[C64_OFF_LOAD]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIMY_L_OFF]], %[[K_COORD_LOAD]]], %{{.*}}[%[[STAGE_LOAD]]] to %[[VIEW_B2_LOAD]], predicate = %[[DO_LOAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C6_FLIP:.*]] = arith.constant 6 : index +# DUMPIR: %[[IS_STAGE6:.*]] = arith.cmpi eq, %[[STAGE_L]], %[[C6_FLIP]] : index +# DUMPIR: %[[TRUE:.*]] = arith.constant true +# DUMPIR: %[[PARITY_FLIP:.*]] = arith.xori %arg17, %[[TRUE]] : i1 +# DUMPIR: %[[NEW_PARITY:.*]] = arith.select %[[IS_STAGE6]], %[[PARITY_FLIP]], %arg17 : i1 +# DUMPIR: scf.yield %[[ACC_L]], %[[NEW_PARITY]] : !nvgpu.warpgroup.accumulator>, i1 +# DUMPIR: } +# DUMPIR: nvvm.wgmma.wait.group.sync.aligned 0 +# DUMPIR: %[[TID_X_EPI:.*]] = gpu.thread_id x +# DUMPIR: %[[BID_X_EPI:.*]] = gpu.block_id x +# DUMPIR: %[[BID_Y_EPI:.*]] = gpu.block_id y +# DUMPIR: %[[C128_EPI1:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMX_EPI:.*]] = arith.muli %[[BID_X_EPI]], %[[C128_EPI1]] : index +# DUMPIR: %[[C128_EPI2:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIMY_EPI:.*]] = arith.muli %[[BID_Y_EPI]], %[[C128_EPI2]] : index +# DUMPIR: %[[SMEM_EPI:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C0_VIEW:.*]] = arith.constant 0 : index +# DUMPIR: %[[VIEW_EPI:.*]] = memref.view %[[SMEM_EPI]][%[[C0_VIEW]]][] : memref> to memref<128x128xf32, #gpu.address_space> +# DUMPIR: %[[SUBVIEW_EPI:.*]] = memref.subview %{{.*}}[%[[DIMX_EPI]], %[[DIMY_EPI]]] [128, 128] [1, 1] : memref<512x256xf32> to memref<128x128xf32, strided<[256, 1], offset: ?>> +# DUMPIR: nvgpu.warpgroup.mma.store %[[LOOP_RES]]#0, %[[VIEW_EPI]] : > to memref<128x128xf32, #gpu.address_space> +# DUMPIR: gpu.barrier +# DUMPIR: %[[C0_STORE:.*]] = arith.constant 0 : index +# DUMPIR: %[[C128_STORE:.*]] = arith.constant 128 : index +# DUMPIR: %[[C1_STORE:.*]] = arith.constant 1 : index +# DUMPIR: scf.for %arg15 = %[[C0_STORE]] to %[[C128_STORE]] step %[[C1_STORE]] { +# DUMPIR: %[[VAL_LOAD:.*]] = memref.load %[[VIEW_EPI]][%arg15, %[[TID_X_EPI]]] : memref<128x128xf32, #gpu.address_space> +# DUMPIR: memref.store %[[VAL_LOAD]], %[[SUBVIEW_EPI]][%arg15, %[[TID_X_EPI]]] : memref<128x128xf32, strided<[256, 1], offset: ?>> diff --git a/mlir/test/Examples/NVGPU/Ch5.py b/mlir/test/Examples/NVGPU/Ch5.py index f98cfd758a75f..b725e50d8f44b 100644 --- a/mlir/test/Examples/NVGPU/Ch5.py +++ b/mlir/test/Examples/NVGPU/Ch5.py @@ -1,5 +1,9 @@ # RUN: env SUPPORT_LIB=%mlir_cuda_runtime \ -# RUN: %PYTHON %s | FileCheck %s +# RUN: sh -c 'if [[ "%mlir_run_cuda_sm90_tests" == "1" ]]; \ +# RUN: then %PYTHON %s | FileCheck %s; \ +# RUN: else export MLIR_NVDSL_PRINT_IR=1; \ +# RUN: %PYTHON %s | FileCheck %s --check-prefix=DUMPIR; fi' + # ===----------------------------------------------------------------------===// # Chapter 5 : Warp Specialized GEMM with Tensor Core @@ -156,7 +160,7 @@ def producer_loop( ): phase = const(True, ty=T.bool()) - for iv, phase in scf.for_(0, (K // TILE_K), 1, [phase]): + for iv, phase, _ in scf.for_(0, (K // TILE_K), 1, [phase]): stage = iv % num_stages # Wait MMA to be done mbar_mma[stage].try_wait(phase) @@ -253,13 +257,13 @@ def epilogue(D: WGMMAMatrix, d_dev): @NVDSL.mlir_func def gemm_warp_specialized(a, b, d, num_stages): token_ty = gpu.AsyncTokenType.get() - t1 = gpu.wait(token_ty, []) + t1 = gpu.wait([]) a_dev, t2 = gpu.alloc(a.type, token_ty, [t1], [], []) b_dev, t3 = gpu.alloc(b.type, token_ty, [t2], [], []) d_dev, t4 = gpu.alloc(d.type, token_ty, [t3], [], []) t5 = gpu.memcpy(token_ty, [t4], a_dev, a) t6 = gpu.memcpy(token_ty, [t5], b_dev, b) - t7 = gpu.wait(token_ty, [t6]) + t7 = gpu.wait([t6]) sw = nvgpu.TensorMapSwizzleKind.SWIZZLE_128B a_tma = TMA([128, 64], a.type, swizzle=sw) @@ -295,7 +299,7 @@ def gemm_warp_specialized_kernel(): gemm_warp_specialized_kernel() t8 = gpu.memcpy(token_ty, [t7], d, d_dev) - gpu.wait(None, [t8]) + gpu.wait([t8]) # Python pass arguments to MLIR @@ -311,11 +315,166 @@ def gemm_warp_specialized_kernel(): gemm_warp_specialized(a, b, d, num_stages=7) +if os.getenv("MLIR_NVDSL_PRINT_IR") != "1": + # Verify MLIR with reference computation + ref_d = a.astype(np.float16) @ b.astype(np.float16) + np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) -# Verify MLIR with reference computation -ref_d = a.astype(np.float16) @ b.astype(np.float16) -np.testing.assert_allclose(d, ref_d, rtol=5e-03, atol=1e-01) - - -print("PASS") + print("PASS") # CHECK-NOT: Mismatched elements +# CHECK: PASS + +# DUMPIR: %[[TID_X:.*]] = gpu.thread_id x +# DUMPIR: %[[C128:.*]] = arith.constant 128 : index +# DUMPIR: %[[REM1:.*]] = arith.remui %[[TID_X]], %[[C128]] : index +# DUMPIR: %[[C0:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_PRIMARY:.*]] = arith.cmpi eq, %[[REM1]], %[[C0]] : index +# DUMPIR: %[[C128_1:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIV1:.*]] = arith.divui %[[TID_X]], %[[C128_1]] : index +# DUMPIR: %[[C1:.*]] = arith.constant 1 : index +# DUMPIR: %[[IS_PRODUCER:.*]] = arith.cmpi eq, %[[DIV1]], %[[C1]] : index +# DUMPIR: %[[TID_X_2:.*]] = gpu.thread_id x +# DUMPIR: %[[C128_2:.*]] = arith.constant 128 : index +# DUMPIR: %[[REM2:.*]] = arith.remui %[[TID_X_2]], %[[C128_2]] : index +# DUMPIR: %[[C0_2:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_PRIMARY_2:.*]] = arith.cmpi eq, %[[REM2]], %[[C0_2]] : index +# DUMPIR: %[[C128_3:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIV2:.*]] = arith.divui %[[TID_X_2]], %[[C128_3]] : index +# DUMPIR: %[[C0_3:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_CONSUMER:.*]] = arith.cmpi eq, %[[DIV2]], %[[C0_3]] : index +# DUMPIR: %[[TID_X_3:.*]] = gpu.thread_id x +# DUMPIR: %[[MBAR_MMA:.*]] = nvgpu.mbarrier.create -> , num_barriers = 7> +# DUMPIR: %[[MBAR_TMA:.*]] = nvgpu.mbarrier.create -> , num_barriers = 7> +# DUMPIR: %[[C0_4:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_THREAD0:.*]] = arith.cmpi eq, %[[TID_X_3]], %[[C0_4]] : index +# DUMPIR: scf.if %[[IS_THREAD0]] { +# DUMPIR: %[[C0_INIT:.*]] = arith.constant 0 : index +# DUMPIR: %[[C7:.*]] = arith.constant 7 : index +# DUMPIR: %[[C1_INIT:.*]] = arith.constant 1 : index +# DUMPIR: scf.for %arg15 = %[[C0_INIT]] to %[[C7]] step %[[C1_INIT]] { +# DUMPIR: %[[C1_INIT_VAL:.*]] = arith.constant 1 : index +# DUMPIR: nvgpu.mbarrier.init %[[MBAR_MMA]][%arg15], %[[C1_INIT_VAL]] : , num_barriers = 7> +# DUMPIR: %[[C1_INIT_VAL_2:.*]] = arith.constant 1 : index +# DUMPIR: nvgpu.mbarrier.init %[[MBAR_TMA]][%arg15], %[[C1_INIT_VAL_2]] : , num_barriers = 7> +# DUMPIR: } +# DUMPIR: nvgpu.tma.prefetch.descriptor %{{.*}} : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: nvgpu.tma.prefetch.descriptor %{{.*}} : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> +# DUMPIR: } +# DUMPIR: scf.if %[[IS_PRODUCER]] { +# DUMPIR: nvvm.setmaxregister decrease 40 +# DUMPIR: %[[TRUE:.*]] = arith.constant true +# DUMPIR: %[[C0_PROD:.*]] = arith.constant 0 : index +# DUMPIR: %[[C16:.*]] = arith.constant 16 : index +# DUMPIR: %[[C1_PROD:.*]] = arith.constant 1 : index +# DUMPIR: %[[PROD_LOOP:.*]] = scf.for %arg15 = %[[C0_PROD]] to %[[C16]] step %[[C1_PROD]] iter_args(%arg16 = %[[TRUE]]) -> (i1) { +# DUMPIR: %[[C7_PROD:.*]] = arith.constant 7 : index +# DUMPIR: %[[SLOT:.*]] = arith.remui %arg15, %[[C7_PROD]] : index +# DUMPIR: %[[TIMEOUT:.*]] = arith.constant 10000000 : index +# DUMPIR: nvgpu.mbarrier.try_wait.parity %[[MBAR_MMA]][%[[SLOT]]], %arg16, %[[TIMEOUT]] : , num_barriers = 7> +# DUMPIR: %[[C6:.*]] = arith.constant 6 : index +# DUMPIR: %[[IS_LAST:.*]] = arith.cmpi eq, %[[SLOT]], %[[C6]] : index +# DUMPIR: %[[TRUE_2:.*]] = arith.constant true +# DUMPIR: %[[FLIP:.*]] = arith.xori %arg16, %[[TRUE_2]] : i1 +# DUMPIR: %[[PHASE:.*]] = arith.select %[[IS_LAST]], %[[FLIP]], %arg16 : i1 +# DUMPIR: %[[BID_X:.*]] = gpu.block_id x +# DUMPIR: %[[BID_Y:.*]] = gpu.block_id y +# DUMPIR: %[[C128_TILE:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIM_X:.*]] = arith.muli %[[BID_X]], %[[C128_TILE]] : index +# DUMPIR: %[[C128_TILE_2:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIM_Y:.*]] = arith.muli %[[BID_Y]], %[[C128_TILE_2]] : index +# DUMPIR: %[[TID_PROD:.*]] = gpu.thread_id x +# DUMPIR: %[[C16384:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_A:.*]] = arith.muli %[[SLOT]], %[[C16384]] : index +# DUMPIR: %[[C16384_2:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_B_BASE:.*]] = arith.muli %[[SLOT]], %[[C16384_2]] : index +# DUMPIR: %[[C114688:.*]] = arith.constant 114688 : index +# DUMPIR: %[[OFF_B1:.*]] = arith.addi %[[OFF_B_BASE]], %[[C114688]] : index +# DUMPIR: %[[C8192:.*]] = arith.constant 8192 : index +# DUMPIR: %[[OFF_B2:.*]] = arith.addi %[[OFF_B1]], %[[C8192]] : index +# DUMPIR: %[[SMEM:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_A:.*]] = memref.view %[[SMEM]][%[[OFF_A]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_2:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B1:.*]] = memref.view %[[SMEM_2]][%[[OFF_B1]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_3:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B2:.*]] = memref.view %[[SMEM_3]][%[[OFF_B2]]][] : memref> to memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[TX_COUNT:.*]] = arith.constant 32768 : index +# DUMPIR: nvgpu.mbarrier.arrive.expect_tx %[[MBAR_TMA]][%[[SLOT]]], %[[TX_COUNT]], predicate = %[[IS_PRIMARY]] : , num_barriers = 7> +# DUMPIR: %[[C128_WG:.*]] = arith.constant 128 : index +# DUMPIR: %[[TID_MOD:.*]] = arith.remui %[[TID_PROD]], %[[C128_WG]] : index +# DUMPIR: %[[C0_TMA:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_TMA_THREAD:.*]] = arith.cmpi eq, %[[TID_MOD]], %[[C0_TMA]] : index +# DUMPIR: %[[C64:.*]] = arith.constant 64 : index +# DUMPIR: %[[K_COORD:.*]] = arith.muli %arg15, %[[C64]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[K_COORD]], %[[DIM_X]]], %[[MBAR_TMA]][%[[SLOT]]] to %[[VIEW_A]], predicate = %[[IS_TMA_THREAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<128x64xf16, #gpu.address_space> +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIM_Y]], %[[K_COORD]]], %[[MBAR_TMA]][%[[SLOT]]] to %[[VIEW_B1]], predicate = %[[IS_TMA_THREAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: %[[C64_OFF:.*]] = arith.constant 64 : index +# DUMPIR: %[[DIM_Y_OFF:.*]] = arith.addi %[[DIM_Y]], %[[C64_OFF]] : index +# DUMPIR: nvgpu.tma.async.load %{{.*}}[%[[DIM_Y_OFF]], %[[K_COORD]]], %[[MBAR_TMA]][%[[SLOT]]] to %[[VIEW_B2]], predicate = %[[IS_TMA_THREAD]] : , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none>, , num_barriers = 7> -> memref<64x64xf16, #gpu.address_space> +# DUMPIR: scf.yield %[[PHASE]] : i1 +# DUMPIR: } +# DUMPIR: } +# DUMPIR: scf.if %[[IS_CONSUMER]] { +# DUMPIR: nvvm.setmaxregister increase 232 +# DUMPIR: %[[FALSE:.*]] = arith.constant false +# DUMPIR: %[[ACC_INIT:.*]] = nvgpu.warpgroup.mma.init.accumulator -> > +# DUMPIR: %[[C0_CONS:.*]] = arith.constant 0 : index +# DUMPIR: %[[C16_CONS:.*]] = arith.constant 16 : index +# DUMPIR: %[[C1_CONS:.*]] = arith.constant 1 : index +# DUMPIR: %[[CONS_LOOP:.*]]:2 = scf.for %arg15 = %[[C0_CONS]] to %[[C16_CONS]] step %[[C1_CONS]] iter_args(%arg16 = %[[ACC_INIT]], %arg17 = %[[FALSE]]) -> (!nvgpu.warpgroup.accumulator>, i1) { +# DUMPIR: %[[C7_CONS:.*]] = arith.constant 7 : index +# DUMPIR: %[[SLOT_CONS:.*]] = arith.remui %arg15, %[[C7_CONS]] : index +# DUMPIR: %[[TIMEOUT_CONS:.*]] = arith.constant 10000000 : index +# DUMPIR: nvgpu.mbarrier.try_wait.parity %[[MBAR_TMA]][%[[SLOT_CONS]]], %arg17, %[[TIMEOUT_CONS]] : , num_barriers = 7> +# DUMPIR: %[[C16384_CONS:.*]] = arith.constant 16384 : index +# DUMPIR: %[[OFF_A_CONS:.*]] = arith.muli %[[SLOT_CONS]], %[[C16384_CONS]] : index +# DUMPIR: %[[C114688_CONS:.*]] = arith.constant 114688 : index +# DUMPIR: %[[OFF_B_CONS:.*]] = arith.addi %[[OFF_A_CONS]], %[[C114688_CONS]] : index +# DUMPIR: %[[SMEM_CONS:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_A_CONS:.*]] = memref.view %[[SMEM_CONS]][%[[OFF_A_CONS]]][] : memref> to memref<128x64xf16, #gpu.address_space> +# DUMPIR: %[[SMEM_CONS_2:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[VIEW_B_CONS:.*]] = memref.view %[[SMEM_CONS_2]][%[[OFF_B_CONS]]][] : memref> to memref<64x128xf16, #gpu.address_space> +# DUMPIR: %[[DESC_A:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW_A_CONS]], %{{.*}} : memref<128x64xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[DESC_B:.*]] = nvgpu.warpgroup.generate.descriptor %[[VIEW_B_CONS]], %{{.*}} : memref<64x128xf16, #gpu.address_space>, , swizzle = swizzle_128b, l2promo = none, oob = zero, interleave = none> -> >> +# DUMPIR: %[[ACC:.*]] = nvgpu.warpgroup.mma %[[DESC_A]], %[[DESC_B]], %arg16 {transposeB} : >>, >>, > -> > +# DUMPIR: %[[C0_CMP:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_NOT_FIRST:.*]] = arith.cmpi ugt, %arg15, %[[C0_CMP]] : index +# DUMPIR: %[[ARRIVE_PRED:.*]] = arith.andi %[[IS_NOT_FIRST]], %[[IS_PRIMARY_2]] : i1 +# DUMPIR: scf.if %[[ARRIVE_PRED]] { +# DUMPIR: %[[C0_ARR:.*]] = arith.constant 0 : index +# DUMPIR: %[[IS_ZERO:.*]] = arith.cmpi eq, %[[SLOT_CONS]], %[[C0_ARR]] : index +# DUMPIR: %[[C6_WRAP:.*]] = arith.constant 6 : index +# DUMPIR: %[[C1_SUB:.*]] = arith.constant 1 : index +# DUMPIR: %[[PREV_SLOT:.*]] = arith.subi %[[SLOT_CONS]], %[[C1_SUB]] : index +# DUMPIR: %[[BARR_ID:.*]] = arith.select %[[IS_ZERO]], %[[C6_WRAP]], %[[PREV_SLOT]] : index +# DUMPIR: %{{.*}} = nvgpu.mbarrier.arrive %[[MBAR_MMA]][%[[BARR_ID]]] : , num_barriers = 7> -> !nvgpu.mbarrier.token +# DUMPIR: } +# DUMPIR: %[[C6_LAST:.*]] = arith.constant 6 : index +# DUMPIR: %[[IS_LAST_CONS:.*]] = arith.cmpi eq, %[[SLOT_CONS]], %[[C6_LAST]] : index +# DUMPIR: %[[TRUE_CONS:.*]] = arith.constant true +# DUMPIR: %[[FLIP_CONS:.*]] = arith.xori %arg17, %[[TRUE_CONS]] : i1 +# DUMPIR: %[[PHASE_CONS:.*]] = arith.select %[[IS_LAST_CONS]], %[[FLIP_CONS]], %arg17 : i1 +# DUMPIR: scf.yield %[[ACC]], %[[PHASE_CONS]] : !nvgpu.warpgroup.accumulator>, i1 +# DUMPIR: } +# DUMPIR: nvvm.wgmma.wait.group.sync.aligned 0 +# DUMPIR: %[[TID_EPI:.*]] = gpu.thread_id x +# DUMPIR: %[[BID_X_EPI:.*]] = gpu.block_id x +# DUMPIR: %[[BID_Y_EPI:.*]] = gpu.block_id y +# DUMPIR: %[[C128_EPI:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIM_X_EPI:.*]] = arith.muli %[[BID_X_EPI]], %[[C128_EPI]] : index +# DUMPIR: %[[C128_EPI_2:.*]] = arith.constant 128 : index +# DUMPIR: %[[DIM_Y_EPI:.*]] = arith.muli %[[BID_Y_EPI]], %[[C128_EPI_2]] : index +# DUMPIR: %[[SMEM_EPI:.*]] = gpu.dynamic_shared_memory : memref> +# DUMPIR: %[[C0_EPI:.*]] = arith.constant 0 : index +# DUMPIR: %[[VIEW_EPI:.*]] = memref.view %[[SMEM_EPI]][%[[C0_EPI]]][] : memref> to memref<128x128xf32, #gpu.address_space> +# DUMPIR: %[[SUBVIEW:.*]] = memref.subview %{{.*}}[%[[DIM_X_EPI]], %[[DIM_Y_EPI]]] [128, 128] [1, 1] : memref<512x256xf32> to memref<128x128xf32, strided<[256, 1], offset: ?>> +# DUMPIR: nvgpu.warpgroup.mma.store %[[CONS_LOOP]]#0, %[[VIEW_EPI]] : > to memref<128x128xf32, #gpu.address_space> +# DUMPIR: gpu.barrier +# DUMPIR: %[[C0_STORE:.*]] = arith.constant 0 : index +# DUMPIR: %[[C128_STORE:.*]] = arith.constant 128 : index +# DUMPIR: %[[C1_STORE:.*]] = arith.constant 1 : index +# DUMPIR: scf.for %arg15 = %[[C0_STORE]] to %[[C128_STORE]] step %[[C1_STORE]] { +# DUMPIR: %{{.*}} = memref.load %[[VIEW_EPI]][%arg15, %[[TID_EPI]]] : memref<128x128xf32, #gpu.address_space> +# DUMPIR: memref.store %{{.*}}, %[[SUBVIEW]][%arg15, %[[TID_EPI]]] : memref<128x128xf32, strided<[256, 1], offset: ?>> +# DUMPIR: } +# DUMPIR: } +# DUMPIR: gpu.terminator diff --git a/mlir/test/Examples/NVGPU/lit.local.cfg b/mlir/test/Examples/NVGPU/lit.local.cfg index 689cd252e7a25..cf975dad53db3 100644 --- a/mlir/test/Examples/NVGPU/lit.local.cfg +++ b/mlir/test/Examples/NVGPU/lit.local.cfg @@ -1,4 +1,4 @@ config.unsupported = False -if not config.enable_cuda_runner or not config.mlir_run_cuda_sm90_tests: +if not config.enable_cuda_runner: config.unsupported = True \ No newline at end of file diff --git a/mlir/test/Examples/NVGPU/tools/nvdsl.py b/mlir/test/Examples/NVGPU/tools/nvdsl.py index 90dbb2355e1c8..856107293470d 100644 --- a/mlir/test/Examples/NVGPU/tools/nvdsl.py +++ b/mlir/test/Examples/NVGPU/tools/nvdsl.py @@ -9,6 +9,7 @@ from tools import nvgpucompiler MLIR_DYNAMIC = -9223372036854775808 +DUMP_ONLY = os.getenv("MLIR_NVDSL_PRINT_IR") == "1" def const(value: int, ty=None): @@ -84,9 +85,7 @@ def arrive(self, txcount: int = 0, predicate=None): self.mbar_group_op, txcount_op, self.id_op, predicate=predicate ) else: - nvgpu.mbarrier_arrive( - ir.Type.parse("!nvgpu.mbarrier.token"), self.mbar_group_op, self.id_op - ) + nvgpu.mbarrier_arrive(self.mbar_group_op, self.id_op) def try_wait(self, phase: bool = False, ticks: int = 10000000): ticks_op = const(ticks) @@ -144,7 +143,9 @@ def create_descriptor(self, device_ptr): device_ptr, ) self.tma_descriptor = nvgpu.TmaCreateDescriptorOp( - tma_descriptor_ty, device_unranked_memref, map(const, self.tma_box_shape) + tma_descriptor_ty, + device_unranked_memref, + list(map(const, self.tma_box_shape)), ) return self.tma_descriptor.result @@ -156,7 +157,7 @@ def load(self, dest, mbarrier: Mbarriers, coords=[0], predicate=None): dest, mbarrier.mbar_group_op, self.tma_descriptor, - coordinates=map(const, coords), + coordinates=list(map(const, coords)), mbarId=mbarrier.id_op, predicate=predicate, ) @@ -310,13 +311,10 @@ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): launch_op = gpu.LaunchOp( - None, - [], - *map(const, grid), - *map(const, block), - dynamicSharedMemorySize=arith.constant(T.i32(), smem), + grid_size=grid, + block_size=block, + dynamic_shared_memory_size=arith.constant(T.i32(), smem), ) - launch_op.body.blocks.append(*([T.index()] * 12)) with ir.InsertionPoint(launch_op.body.blocks[0]): result = func(*args, **kwargs) gpu.terminator() @@ -334,13 +332,11 @@ def wrapper(*args, **kwargs): def saveIR(module): """Save generated IR""" - if True: # self.saveIR: - # print(mlir_nvgpu_module) - original_stdout = sys.stdout - with open("nvdsl.mlir", "w") as f: - sys.stdout = f - print(module) - sys.stdout = original_stdout + original_stdout = sys.stdout + with open("nvdsl.mlir", "w") as f: + sys.stdout = f + print(module) + sys.stdout = original_stdout def _binary_op(lhs, rhs, op: str, predAtt="") -> "ArithValue": """Generate MLIR's Arith dialects binary operations.""" @@ -429,6 +425,9 @@ def __str__(self): # Save IR in a file # saveIR(module) + if DUMP_ONLY: + print(module) + return 0 # Verify the module module.operation.verify() diff --git a/mlir/test/Examples/NVGPU/tools/nvgpucompiler.py b/mlir/test/Examples/NVGPU/tools/nvgpucompiler.py index 1c9cc74fcd169..4b661f8df6a9f 100644 --- a/mlir/test/Examples/NVGPU/tools/nvgpucompiler.py +++ b/mlir/test/Examples/NVGPU/tools/nvgpucompiler.py @@ -35,9 +35,11 @@ def compile(self, module: ir.Module): def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine: """Wraps the module in a JIT execution engine.""" - return execution_engine.ExecutionEngine( + ee = execution_engine.ExecutionEngine( module, opt_level=self.opt_level, shared_libs=self.shared_libs ) + ee.initialize() + return ee def compile_and_jit(self, module: ir.Module) -> execution_engine.ExecutionEngine: """Compiles and jits the module."""