@@ -129,9 +129,8 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, <
129129define <vscale x 8 x i8 > @strided_vpload_nxv8i8_unit_stride (ptr %ptr , <vscale x 8 x i1 > %m , i32 zeroext %evl ) {
130130; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride:
131131; CHECK: # %bb.0:
132- ; CHECK-NEXT: li a2, 1
133132; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
134- ; CHECK-NEXT: vlse8 .v v8, (a0), a2 , v0.t
133+ ; CHECK-NEXT: vle8 .v v8, (a0), v0.t
135134; CHECK-NEXT: ret
136135 %load = call <vscale x 8 x i8 > @llvm.experimental.vp.strided.load.nxv8i8.p0.i32 (ptr %ptr , i32 1 , <vscale x 8 x i1 > %m , i32 %evl )
137136 ret <vscale x 8 x i8 > %load
@@ -200,9 +199,8 @@ define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride,
200199define <vscale x 4 x i16 > @strided_vpload_nxv4i16_unit_stride (ptr %ptr , <vscale x 4 x i1 > %m , i32 zeroext %evl ) {
201200; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride:
202201; CHECK: # %bb.0:
203- ; CHECK-NEXT: li a2, 2
204202; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
205- ; CHECK-NEXT: vlse16 .v v8, (a0), a2 , v0.t
203+ ; CHECK-NEXT: vle16 .v v8, (a0), v0.t
206204; CHECK-NEXT: ret
207205 %load = call <vscale x 4 x i16 > @llvm.experimental.vp.strided.load.nxv4i16.p0.i32 (ptr %ptr , i32 2 , <vscale x 4 x i1 > %m , i32 %evl )
208206 ret <vscale x 4 x i16 > %load
@@ -247,9 +245,8 @@ define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride,
247245define <vscale x 2 x i32 > @strided_vpload_nxv2i32_unit_stride (ptr %ptr , <vscale x 2 x i1 > %m , i32 zeroext %evl ) {
248246; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride:
249247; CHECK: # %bb.0:
250- ; CHECK-NEXT: li a2, 4
251248; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
252- ; CHECK-NEXT: vlse32 .v v8, (a0), a2 , v0.t
249+ ; CHECK-NEXT: vle32 .v v8, (a0), v0.t
253250; CHECK-NEXT: ret
254251 %load = call <vscale x 2 x i32 > @llvm.experimental.vp.strided.load.nxv2i32.p0.i32 (ptr %ptr , i32 4 , <vscale x 2 x i1 > %m , i32 %evl )
255252 ret <vscale x 2 x i32 > %load
@@ -306,9 +303,8 @@ define <vscale x 1 x i64> @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride,
306303define <vscale x 1 x i64 > @strided_vpload_nxv1i64_unit_stride (ptr %ptr , <vscale x 1 x i1 > %m , i32 zeroext %evl ) {
307304; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride:
308305; CHECK: # %bb.0:
309- ; CHECK-NEXT: li a2, 8
310306; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
311- ; CHECK-NEXT: vlse64 .v v8, (a0), a2 , v0.t
307+ ; CHECK-NEXT: vle64 .v v8, (a0), v0.t
312308; CHECK-NEXT: ret
313309 %load = call <vscale x 1 x i64 > @llvm.experimental.vp.strided.load.nxv1i64.p0.i32 (ptr %ptr , i32 8 , <vscale x 1 x i1 > %m , i32 %evl )
314310 ret <vscale x 1 x i64 > %load
@@ -413,9 +409,8 @@ define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride
413409define <vscale x 4 x half > @strided_vpload_nxv4f16_unit_stride (ptr %ptr , <vscale x 4 x i1 > %m , i32 zeroext %evl ) {
414410; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride:
415411; CHECK: # %bb.0:
416- ; CHECK-NEXT: li a2, 2
417412; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
418- ; CHECK-NEXT: vlse16 .v v8, (a0), a2 , v0.t
413+ ; CHECK-NEXT: vle16 .v v8, (a0), v0.t
419414; CHECK-NEXT: ret
420415 %load = call <vscale x 4 x half > @llvm.experimental.vp.strided.load.nxv4f16.p0.i32 (ptr %ptr , i32 2 , <vscale x 4 x i1 > %m , i32 %evl )
421416 ret <vscale x 4 x half > %load
@@ -460,9 +455,8 @@ define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid
460455define <vscale x 2 x float > @strided_vpload_nxv2f32_unit_stride (ptr %ptr , <vscale x 2 x i1 > %m , i32 zeroext %evl ) {
461456; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride:
462457; CHECK: # %bb.0:
463- ; CHECK-NEXT: li a2, 4
464458; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
465- ; CHECK-NEXT: vlse32 .v v8, (a0), a2 , v0.t
459+ ; CHECK-NEXT: vle32 .v v8, (a0), v0.t
466460; CHECK-NEXT: ret
467461 %load = call <vscale x 2 x float > @llvm.experimental.vp.strided.load.nxv2f32.p0.i32 (ptr %ptr , i32 4 , <vscale x 2 x i1 > %m , i32 %evl )
468462 ret <vscale x 2 x float > %load
@@ -519,9 +513,8 @@ define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri
519513define <vscale x 1 x double > @strided_vpload_nxv1f64_unit_stride (ptr %ptr , <vscale x 1 x i1 > %m , i32 zeroext %evl ) {
520514; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride:
521515; CHECK: # %bb.0:
522- ; CHECK-NEXT: li a2, 8
523516; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
524- ; CHECK-NEXT: vlse64 .v v8, (a0), a2 , v0.t
517+ ; CHECK-NEXT: vle64 .v v8, (a0), v0.t
525518; CHECK-NEXT: ret
526519 %load = call <vscale x 1 x double > @llvm.experimental.vp.strided.load.nxv1f64.p0.i32 (ptr %ptr , i32 8 , <vscale x 1 x i1 > %m , i32 %evl )
527520 ret <vscale x 1 x double > %load
0 commit comments