-
Notifications
You must be signed in to change notification settings - Fork 15.7k
[RISCV] Add unit strided load/store to whole register peephole #100116
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
4748edf
02605fd
da9f0e9
bcf97fe
d1afc0e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -59,6 +59,7 @@ class RISCVVectorPeephole : public MachineFunctionPass { | |
|
|
||
| private: | ||
| bool convertToVLMAX(MachineInstr &MI) const; | ||
| bool convertToWholeRegister(MachineInstr &MI) const; | ||
| bool convertToUnmasked(MachineInstr &MI) const; | ||
| bool convertVMergeToVMv(MachineInstr &MI) const; | ||
|
|
||
|
|
@@ -155,6 +156,58 @@ bool RISCVVectorPeephole::isAllOnesMask(const MachineInstr *MaskDef) const { | |
| } | ||
| } | ||
|
|
||
| /// Convert unit strided unmasked loads and stores to whole-register equivalents | ||
| /// to avoid the dependency on $vl and $vtype. | ||
| /// | ||
| /// %x = PseudoVLE8_V_M1 %passthru, %ptr, %vlmax, policy | ||
| /// PseudoVSE8_V_M1 %v, %ptr, %vlmax | ||
| /// | ||
| /// -> | ||
| /// | ||
| /// %x = VL1RE8_V %passthru, %ptr | ||
| /// VS1R_V %v, %ptr | ||
| bool RISCVVectorPeephole::convertToWholeRegister(MachineInstr &MI) const { | ||
| #define CASE_WHOLE_REGISTER_LMUL_SEW(lmul, sew) \ | ||
| case RISCV::PseudoVLE##sew##_V_M##lmul: \ | ||
| NewOpc = RISCV::VL##lmul##RE##sew##_V; \ | ||
| break; \ | ||
| case RISCV::PseudoVSE##sew##_V_M##lmul: \ | ||
| NewOpc = RISCV::VS##lmul##R_V; \ | ||
| break; | ||
| #define CASE_WHOLE_REGISTER_LMUL(lmul) \ | ||
| CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 8) \ | ||
| CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 16) \ | ||
| CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 32) \ | ||
| CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 64) | ||
|
|
||
| unsigned NewOpc; | ||
| switch (MI.getOpcode()) { | ||
| CASE_WHOLE_REGISTER_LMUL(1) | ||
| CASE_WHOLE_REGISTER_LMUL(2) | ||
| CASE_WHOLE_REGISTER_LMUL(4) | ||
| CASE_WHOLE_REGISTER_LMUL(8) | ||
| default: | ||
| return false; | ||
| } | ||
|
|
||
| MachineOperand &VLOp = MI.getOperand(RISCVII::getVLOpNum(MI.getDesc())); | ||
| if (!VLOp.isImm() || VLOp.getImm() != RISCV::VLMaxSentinel) | ||
| return false; | ||
|
|
||
| // Stores don't have a policy op | ||
| if (RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags)) | ||
| MI.removeOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())); | ||
| MI.removeOperand(RISCVII::getSEWOpNum(MI.getDesc())); | ||
| MI.removeOperand(RISCVII::getVLOpNum(MI.getDesc())); | ||
| // Stores don't have a passthru | ||
|
||
| if (RISCVII::isFirstDefTiedToFirstUse(MI.getDesc())) | ||
| MI.removeOperand(1); | ||
|
|
||
| MI.setDesc(TII->get(NewOpc)); | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| // Transform (VMERGE_VVM_<LMUL> false, false, true, allones, vl, sew) to | ||
| // (VMV_V_V_<LMUL> false, true, vl, sew). It may decrease uses of VMSET. | ||
| bool RISCVVectorPeephole::convertVMergeToVMv(MachineInstr &MI) const { | ||
|
|
@@ -281,6 +334,7 @@ bool RISCVVectorPeephole::runOnMachineFunction(MachineFunction &MF) { | |
| for (MachineInstr &MI : MBB) { | ||
| Changed |= convertToVLMAX(MI); | ||
| Changed |= convertToUnmasked(MI); | ||
| Changed |= convertToWholeRegister(MI); | ||
| Changed |= convertVMergeToVMv(MI); | ||
| } | ||
| } | ||
|
|
||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These functions are marked with optnone so the machine SSA optimisation passes aren't run, including RISCVVectorPeephole. |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,7 +17,7 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale | |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 | ||
| ; CHECK-NEXT: $v0 = COPY [[COPY1]] | ||
| ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8) | ||
| ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p) | ||
| ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store (<vscale x 1 x s64>) into %ir.p) | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test stops after isel, so this is expected since RISCVVectorPeephole isn't run. |
||
| ; CHECK-NEXT: PseudoRET | ||
| %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl) | ||
| %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl) | ||
|
|
@@ -36,7 +36,7 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale | |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 | ||
| ; CHECK-NEXT: $v0 = COPY [[COPY1]] | ||
| ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8) | ||
| ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p) | ||
| ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store (<vscale x 1 x s64>) into %ir.p) | ||
| ; CHECK-NEXT: PseudoRET | ||
| %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl) | ||
| %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Invert this sentence? Like
Pseudos for Unit-Stride Loads have a policy op, we should remove it as Whole Register Loads don't have one.I was confused by this at the first glance as thought "why should we do this if stores have no policy op" came to me.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah this is a bit confusing, I think I put that there because unit strided stores might not have a policy op hence, hence why I'm only checking for the policy op and not the VL or SEW. Will reword