1//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9#include "llvm/ADT/APFloat.h"
10#include "llvm/ADT/STLExtras.h"
11#include "llvm/ADT/SetVector.h"
12#include "llvm/ADT/SmallBitVector.h"
13#include "llvm/Analysis/CmpInstAnalysis.h"
14#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
15#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
16#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
17#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
18#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21#include "llvm/CodeGen/GlobalISel/Utils.h"
22#include "llvm/CodeGen/LowLevelTypeUtils.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineDominators.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineMemOperand.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Register.h"
29#include "llvm/CodeGen/RegisterBankInfo.h"
30#include "llvm/CodeGen/TargetInstrInfo.h"
31#include "llvm/CodeGen/TargetLowering.h"
32#include "llvm/CodeGen/TargetOpcodes.h"
33#include "llvm/IR/ConstantRange.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/Support/Casting.h"
37#include "llvm/Support/DivisionByConstantInfo.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/MathExtras.h"
40#include "llvm/Target/TargetMachine.h"
41#include <cmath>
42#include <optional>
43#include <tuple>
44
45#define DEBUG_TYPE "gi-combiner"
46
47using namespace llvm;
48using namespace MIPatternMatch;
49
50// Option to allow testing of the combiner while no targets know about indexed
51// addressing.
52static cl::opt<bool>
53 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(Val: false),
54 cl::desc("Force all indexed operations to be "
55 "legal for the GlobalISel combiner"));
56
57CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
58 MachineIRBuilder &B, bool IsPreLegalize,
59 GISelValueTracking *VT,
60 MachineDominatorTree *MDT,
61 const LegalizerInfo *LI)
62 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), VT(VT),
63 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
64 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
65 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
66 (void)this->VT;
67}
68
69const TargetLowering &CombinerHelper::getTargetLowering() const {
70 return *Builder.getMF().getSubtarget().getTargetLowering();
71}
72
73const MachineFunction &CombinerHelper::getMachineFunction() const {
74 return Builder.getMF();
75}
76
77const DataLayout &CombinerHelper::getDataLayout() const {
78 return getMachineFunction().getDataLayout();
79}
80
81LLVMContext &CombinerHelper::getContext() const { return Builder.getContext(); }
82
83/// \returns The little endian in-memory byte position of byte \p I in a
84/// \p ByteWidth bytes wide type.
85///
86/// E.g. Given a 4-byte type x, x[0] -> byte 0
87static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
88 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
89 return I;
90}
91
92/// Determines the LogBase2 value for a non-null input value using the
93/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
94static Register buildLogBase2(Register V, MachineIRBuilder &MIB) {
95 auto &MRI = *MIB.getMRI();
96 LLT Ty = MRI.getType(Reg: V);
97 auto Ctlz = MIB.buildCTLZ(Dst: Ty, Src0: V);
98 auto Base = MIB.buildConstant(Res: Ty, Val: Ty.getScalarSizeInBits() - 1);
99 return MIB.buildSub(Dst: Ty, Src0: Base, Src1: Ctlz).getReg(Idx: 0);
100}
101
102/// \returns The big endian in-memory byte position of byte \p I in a
103/// \p ByteWidth bytes wide type.
104///
105/// E.g. Given a 4-byte type x, x[0] -> byte 3
106static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
107 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
108 return ByteWidth - I - 1;
109}
110
111/// Given a map from byte offsets in memory to indices in a load/store,
112/// determine if that map corresponds to a little or big endian byte pattern.
113///
114/// \param MemOffset2Idx maps memory offsets to address offsets.
115/// \param LowestIdx is the lowest index in \p MemOffset2Idx.
116///
117/// \returns true if the map corresponds to a big endian byte pattern, false if
118/// it corresponds to a little endian byte pattern, and std::nullopt otherwise.
119///
120/// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
121/// are as follows:
122///
123/// AddrOffset Little endian Big endian
124/// 0 0 3
125/// 1 1 2
126/// 2 2 1
127/// 3 3 0
128static std::optional<bool>
129isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
130 int64_t LowestIdx) {
131 // Need at least two byte positions to decide on endianness.
132 unsigned Width = MemOffset2Idx.size();
133 if (Width < 2)
134 return std::nullopt;
135 bool BigEndian = true, LittleEndian = true;
136 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
137 auto MemOffsetAndIdx = MemOffset2Idx.find(Val: MemOffset);
138 if (MemOffsetAndIdx == MemOffset2Idx.end())
139 return std::nullopt;
140 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
141 assert(Idx >= 0 && "Expected non-negative byte offset?");
142 LittleEndian &= Idx == littleEndianByteAt(ByteWidth: Width, I: MemOffset);
143 BigEndian &= Idx == bigEndianByteAt(ByteWidth: Width, I: MemOffset);
144 if (!BigEndian && !LittleEndian)
145 return std::nullopt;
146 }
147
148 assert((BigEndian != LittleEndian) &&
149 "Pattern cannot be both big and little endian!");
150 return BigEndian;
151}
152
153bool CombinerHelper::isPreLegalize() const { return IsPreLegalize; }
154
155bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
156 assert(LI && "Must have LegalizerInfo to query isLegal!");
157 return LI->getAction(Query).Action == LegalizeActions::Legal;
158}
159
160bool CombinerHelper::isLegalOrBeforeLegalizer(
161 const LegalityQuery &Query) const {
162 return isPreLegalize() || isLegal(Query);
163}
164
165bool CombinerHelper::isLegalOrHasWidenScalar(const LegalityQuery &Query) const {
166 return isLegal(Query) ||
167 LI->getAction(Query).Action == LegalizeActions::WidenScalar;
168}
169
170bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const {
171 if (!Ty.isVector())
172 return isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_CONSTANT, {Ty}});
173 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
174 if (isPreLegalize())
175 return true;
176 LLT EltTy = Ty.getElementType();
177 return isLegal(Query: {TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
178 isLegal(Query: {TargetOpcode::G_CONSTANT, {EltTy}});
179}
180
181void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
182 Register ToReg) const {
183 Observer.changingAllUsesOfReg(MRI, Reg: FromReg);
184
185 if (MRI.constrainRegAttrs(Reg: ToReg, ConstrainingReg: FromReg))
186 MRI.replaceRegWith(FromReg, ToReg);
187 else
188 Builder.buildCopy(Res: FromReg, Op: ToReg);
189
190 Observer.finishedChangingAllUsesOfReg();
191}
192
193void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
194 MachineOperand &FromRegOp,
195 Register ToReg) const {
196 assert(FromRegOp.getParent() && "Expected an operand in an MI");
197 Observer.changingInstr(MI&: *FromRegOp.getParent());
198
199 FromRegOp.setReg(ToReg);
200
201 Observer.changedInstr(MI&: *FromRegOp.getParent());
202}
203
204void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI,
205 unsigned ToOpcode) const {
206 Observer.changingInstr(MI&: FromMI);
207
208 FromMI.setDesc(Builder.getTII().get(Opcode: ToOpcode));
209
210 Observer.changedInstr(MI&: FromMI);
211}
212
213const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
214 return RBI->getRegBank(Reg, MRI, TRI: *TRI);
215}
216
217void CombinerHelper::setRegBank(Register Reg,
218 const RegisterBank *RegBank) const {
219 if (RegBank)
220 MRI.setRegBank(Reg, RegBank: *RegBank);
221}
222
223bool CombinerHelper::tryCombineCopy(MachineInstr &MI) const {
224 if (matchCombineCopy(MI)) {
225 applyCombineCopy(MI);
226 return true;
227 }
228 return false;
229}
230bool CombinerHelper::matchCombineCopy(MachineInstr &MI) const {
231 if (MI.getOpcode() != TargetOpcode::COPY)
232 return false;
233 Register DstReg = MI.getOperand(i: 0).getReg();
234 Register SrcReg = MI.getOperand(i: 1).getReg();
235 return canReplaceReg(DstReg, SrcReg, MRI);
236}
237void CombinerHelper::applyCombineCopy(MachineInstr &MI) const {
238 Register DstReg = MI.getOperand(i: 0).getReg();
239 Register SrcReg = MI.getOperand(i: 1).getReg();
240 replaceRegWith(MRI, FromReg: DstReg, ToReg: SrcReg);
241 MI.eraseFromParent();
242}
243
244bool CombinerHelper::matchFreezeOfSingleMaybePoisonOperand(
245 MachineInstr &MI, BuildFnTy &MatchInfo) const {
246 // Ported from InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating.
247 Register DstOp = MI.getOperand(i: 0).getReg();
248 Register OrigOp = MI.getOperand(i: 1).getReg();
249
250 if (!MRI.hasOneNonDBGUse(RegNo: OrigOp))
251 return false;
252
253 MachineInstr *OrigDef = MRI.getUniqueVRegDef(Reg: OrigOp);
254 // Even if only a single operand of the PHI is not guaranteed non-poison,
255 // moving freeze() backwards across a PHI can cause optimization issues for
256 // other users of that operand.
257 //
258 // Moving freeze() from one of the output registers of a G_UNMERGE_VALUES to
259 // the source register is unprofitable because it makes the freeze() more
260 // strict than is necessary (it would affect the whole register instead of
261 // just the subreg being frozen).
262 if (OrigDef->isPHI() || isa<GUnmerge>(Val: OrigDef))
263 return false;
264
265 if (canCreateUndefOrPoison(Reg: OrigOp, MRI,
266 /*ConsiderFlagsAndMetadata=*/false))
267 return false;
268
269 std::optional<MachineOperand> MaybePoisonOperand;
270 for (MachineOperand &Operand : OrigDef->uses()) {
271 if (!Operand.isReg())
272 return false;
273
274 if (isGuaranteedNotToBeUndefOrPoison(Reg: Operand.getReg(), MRI))
275 continue;
276
277 if (!MaybePoisonOperand)
278 MaybePoisonOperand = Operand;
279 else {
280 // We have more than one maybe-poison operand. Moving the freeze is
281 // unsafe.
282 return false;
283 }
284 }
285
286 // Eliminate freeze if all operands are guaranteed non-poison.
287 if (!MaybePoisonOperand) {
288 MatchInfo = [=](MachineIRBuilder &B) {
289 Observer.changingInstr(MI&: *OrigDef);
290 cast<GenericMachineInstr>(Val: OrigDef)->dropPoisonGeneratingFlags();
291 Observer.changedInstr(MI&: *OrigDef);
292 B.buildCopy(Res: DstOp, Op: OrigOp);
293 };
294 return true;
295 }
296
297 Register MaybePoisonOperandReg = MaybePoisonOperand->getReg();
298 LLT MaybePoisonOperandRegTy = MRI.getType(Reg: MaybePoisonOperandReg);
299
300 MatchInfo = [=](MachineIRBuilder &B) mutable {
301 Observer.changingInstr(MI&: *OrigDef);
302 cast<GenericMachineInstr>(Val: OrigDef)->dropPoisonGeneratingFlags();
303 Observer.changedInstr(MI&: *OrigDef);
304 B.setInsertPt(MBB&: *OrigDef->getParent(), II: OrigDef->getIterator());
305 auto Freeze = B.buildFreeze(Dst: MaybePoisonOperandRegTy, Src: MaybePoisonOperandReg);
306 replaceRegOpWith(
307 MRI, FromRegOp&: *OrigDef->findRegisterUseOperand(Reg: MaybePoisonOperandReg, TRI),
308 ToReg: Freeze.getReg(Idx: 0));
309 replaceRegWith(MRI, FromReg: DstOp, ToReg: OrigOp);
310 };
311 return true;
312}
313
314bool CombinerHelper::matchCombineConcatVectors(
315 MachineInstr &MI, SmallVector<Register> &Ops) const {
316 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
317 "Invalid instruction");
318 bool IsUndef = true;
319 MachineInstr *Undef = nullptr;
320
321 // Walk over all the operands of concat vectors and check if they are
322 // build_vector themselves or undef.
323 // Then collect their operands in Ops.
324 for (const MachineOperand &MO : MI.uses()) {
325 Register Reg = MO.getReg();
326 MachineInstr *Def = MRI.getVRegDef(Reg);
327 assert(Def && "Operand not defined");
328 if (!MRI.hasOneNonDBGUse(RegNo: Reg))
329 return false;
330 switch (Def->getOpcode()) {
331 case TargetOpcode::G_BUILD_VECTOR:
332 IsUndef = false;
333 // Remember the operands of the build_vector to fold
334 // them into the yet-to-build flattened concat vectors.
335 for (const MachineOperand &BuildVecMO : Def->uses())
336 Ops.push_back(Elt: BuildVecMO.getReg());
337 break;
338 case TargetOpcode::G_IMPLICIT_DEF: {
339 LLT OpType = MRI.getType(Reg);
340 // Keep one undef value for all the undef operands.
341 if (!Undef) {
342 Builder.setInsertPt(MBB&: *MI.getParent(), II: MI);
343 Undef = Builder.buildUndef(Res: OpType.getScalarType());
344 }
345 assert(MRI.getType(Undef->getOperand(0).getReg()) ==
346 OpType.getScalarType() &&
347 "All undefs should have the same type");
348 // Break the undef vector in as many scalar elements as needed
349 // for the flattening.
350 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
351 EltIdx != EltEnd; ++EltIdx)
352 Ops.push_back(Elt: Undef->getOperand(i: 0).getReg());
353 break;
354 }
355 default:
356 return false;
357 }
358 }
359
360 // Check if the combine is illegal
361 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
362 if (!isLegalOrBeforeLegalizer(
363 Query: {TargetOpcode::G_BUILD_VECTOR, {DstTy, MRI.getType(Reg: Ops[0])}})) {
364 return false;
365 }
366
367 if (IsUndef)
368 Ops.clear();
369
370 return true;
371}
372void CombinerHelper::applyCombineConcatVectors(
373 MachineInstr &MI, SmallVector<Register> &Ops) const {
374 // We determined that the concat_vectors can be flatten.
375 // Generate the flattened build_vector.
376 Register DstReg = MI.getOperand(i: 0).getReg();
377 Builder.setInsertPt(MBB&: *MI.getParent(), II: MI);
378 Register NewDstReg = MRI.cloneVirtualRegister(VReg: DstReg);
379
380 // Note: IsUndef is sort of redundant. We could have determine it by
381 // checking that at all Ops are undef. Alternatively, we could have
382 // generate a build_vector of undefs and rely on another combine to
383 // clean that up. For now, given we already gather this information
384 // in matchCombineConcatVectors, just save compile time and issue the
385 // right thing.
386 if (Ops.empty())
387 Builder.buildUndef(Res: NewDstReg);
388 else
389 Builder.buildBuildVector(Res: NewDstReg, Ops);
390 replaceRegWith(MRI, FromReg: DstReg, ToReg: NewDstReg);
391 MI.eraseFromParent();
392}
393
394bool CombinerHelper::matchCombineShuffleToBuildVector(MachineInstr &MI) const {
395 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
396 "Invalid instruction");
397 auto &Shuffle = cast<GShuffleVector>(Val&: MI);
398
399 Register SrcVec1 = Shuffle.getSrc1Reg();
400 Register SrcVec2 = Shuffle.getSrc2Reg();
401
402 LLT SrcVec1Type = MRI.getType(Reg: SrcVec1);
403 LLT SrcVec2Type = MRI.getType(Reg: SrcVec2);
404 return SrcVec1Type.isVector() && SrcVec2Type.isVector();
405}
406
407void CombinerHelper::applyCombineShuffleToBuildVector(MachineInstr &MI) const {
408 auto &Shuffle = cast<GShuffleVector>(Val&: MI);
409
410 Register SrcVec1 = Shuffle.getSrc1Reg();
411 Register SrcVec2 = Shuffle.getSrc2Reg();
412 LLT EltTy = MRI.getType(Reg: SrcVec1).getElementType();
413 int Width = MRI.getType(Reg: SrcVec1).getNumElements();
414
415 auto Unmerge1 = Builder.buildUnmerge(Res: EltTy, Op: SrcVec1);
416 auto Unmerge2 = Builder.buildUnmerge(Res: EltTy, Op: SrcVec2);
417
418 SmallVector<Register> Extracts;
419 // Select only applicable elements from unmerged values.
420 for (int Val : Shuffle.getMask()) {
421 if (Val == -1)
422 Extracts.push_back(Elt: Builder.buildUndef(Res: EltTy).getReg(Idx: 0));
423 else if (Val < Width)
424 Extracts.push_back(Elt: Unmerge1.getReg(Idx: Val));
425 else
426 Extracts.push_back(Elt: Unmerge2.getReg(Idx: Val - Width));
427 }
428 assert(Extracts.size() > 0 && "Expected at least one element in the shuffle");
429 if (Extracts.size() == 1)
430 Builder.buildCopy(Res: MI.getOperand(i: 0).getReg(), Op: Extracts[0]);
431 else
432 Builder.buildBuildVector(Res: MI.getOperand(i: 0).getReg(), Ops: Extracts);
433 MI.eraseFromParent();
434}
435
436bool CombinerHelper::matchCombineShuffleConcat(
437 MachineInstr &MI, SmallVector<Register> &Ops) const {
438 ArrayRef<int> Mask = MI.getOperand(i: 3).getShuffleMask();
439 auto ConcatMI1 =
440 dyn_cast<GConcatVectors>(Val: MRI.getVRegDef(Reg: MI.getOperand(i: 1).getReg()));
441 auto ConcatMI2 =
442 dyn_cast<GConcatVectors>(Val: MRI.getVRegDef(Reg: MI.getOperand(i: 2).getReg()));
443 if (!ConcatMI1 || !ConcatMI2)
444 return false;
445
446 // Check that the sources of the Concat instructions have the same type
447 if (MRI.getType(Reg: ConcatMI1->getSourceReg(I: 0)) !=
448 MRI.getType(Reg: ConcatMI2->getSourceReg(I: 0)))
449 return false;
450
451 LLT ConcatSrcTy = MRI.getType(Reg: ConcatMI1->getReg(Idx: 1));
452 LLT ShuffleSrcTy1 = MRI.getType(Reg: MI.getOperand(i: 1).getReg());
453 unsigned ConcatSrcNumElt = ConcatSrcTy.getNumElements();
454 for (unsigned i = 0; i < Mask.size(); i += ConcatSrcNumElt) {
455 // Check if the index takes a whole source register from G_CONCAT_VECTORS
456 // Assumes that all Sources of G_CONCAT_VECTORS are the same type
457 if (Mask[i] == -1) {
458 for (unsigned j = 1; j < ConcatSrcNumElt; j++) {
459 if (i + j >= Mask.size())
460 return false;
461 if (Mask[i + j] != -1)
462 return false;
463 }
464 if (!isLegalOrBeforeLegalizer(
465 Query: {TargetOpcode::G_IMPLICIT_DEF, {ConcatSrcTy}}))
466 return false;
467 Ops.push_back(Elt: 0);
468 } else if (Mask[i] % ConcatSrcNumElt == 0) {
469 for (unsigned j = 1; j < ConcatSrcNumElt; j++) {
470 if (i + j >= Mask.size())
471 return false;
472 if (Mask[i + j] != Mask[i] + static_cast<int>(j))
473 return false;
474 }
475 // Retrieve the source register from its respective G_CONCAT_VECTORS
476 // instruction
477 if (Mask[i] < ShuffleSrcTy1.getNumElements()) {
478 Ops.push_back(Elt: ConcatMI1->getSourceReg(I: Mask[i] / ConcatSrcNumElt));
479 } else {
480 Ops.push_back(Elt: ConcatMI2->getSourceReg(I: Mask[i] / ConcatSrcNumElt -
481 ConcatMI1->getNumSources()));
482 }
483 } else {
484 return false;
485 }
486 }
487
488 if (!isLegalOrBeforeLegalizer(
489 Query: {TargetOpcode::G_CONCAT_VECTORS,
490 {MRI.getType(Reg: MI.getOperand(i: 0).getReg()), ConcatSrcTy}}))
491 return false;
492
493 return !Ops.empty();
494}
495
496void CombinerHelper::applyCombineShuffleConcat(
497 MachineInstr &MI, SmallVector<Register> &Ops) const {
498 LLT SrcTy;
499 for (Register &Reg : Ops) {
500 if (Reg != 0)
501 SrcTy = MRI.getType(Reg);
502 }
503 assert(SrcTy.isValid() && "Unexpected full undef vector in concat combine");
504
505 Register UndefReg = 0;
506
507 for (Register &Reg : Ops) {
508 if (Reg == 0) {
509 if (UndefReg == 0)
510 UndefReg = Builder.buildUndef(Res: SrcTy).getReg(Idx: 0);
511 Reg = UndefReg;
512 }
513 }
514
515 if (Ops.size() > 1)
516 Builder.buildConcatVectors(Res: MI.getOperand(i: 0).getReg(), Ops);
517 else
518 Builder.buildCopy(Res: MI.getOperand(i: 0).getReg(), Op: Ops[0]);
519 MI.eraseFromParent();
520}
521
522bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) const {
523 SmallVector<Register, 4> Ops;
524 if (matchCombineShuffleVector(MI, Ops)) {
525 applyCombineShuffleVector(MI, Ops);
526 return true;
527 }
528 return false;
529}
530
531bool CombinerHelper::matchCombineShuffleVector(
532 MachineInstr &MI, SmallVectorImpl<Register> &Ops) const {
533 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
534 "Invalid instruction kind");
535 LLT DstType = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
536 Register Src1 = MI.getOperand(i: 1).getReg();
537 LLT SrcType = MRI.getType(Reg: Src1);
538 // As bizarre as it may look, shuffle vector can actually produce
539 // scalar! This is because at the IR level a <1 x ty> shuffle
540 // vector is perfectly valid.
541 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
542 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
543
544 // If the resulting vector is smaller than the size of the source
545 // vectors being concatenated, we won't be able to replace the
546 // shuffle vector into a concat_vectors.
547 //
548 // Note: We may still be able to produce a concat_vectors fed by
549 // extract_vector_elt and so on. It is less clear that would
550 // be better though, so don't bother for now.
551 //
552 // If the destination is a scalar, the size of the sources doesn't
553 // matter. we will lower the shuffle to a plain copy. This will
554 // work only if the source and destination have the same size. But
555 // that's covered by the next condition.
556 //
557 // TODO: If the size between the source and destination don't match
558 // we could still emit an extract vector element in that case.
559 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
560 return false;
561
562 // Check that the shuffle mask can be broken evenly between the
563 // different sources.
564 if (DstNumElts % SrcNumElts != 0)
565 return false;
566
567 // Mask length is a multiple of the source vector length.
568 // Check if the shuffle is some kind of concatenation of the input
569 // vectors.
570 unsigned NumConcat = DstNumElts / SrcNumElts;
571 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
572 ArrayRef<int> Mask = MI.getOperand(i: 3).getShuffleMask();
573 for (unsigned i = 0; i != DstNumElts; ++i) {
574 int Idx = Mask[i];
575 // Undef value.
576 if (Idx < 0)
577 continue;
578 // Ensure the indices in each SrcType sized piece are sequential and that
579 // the same source is used for the whole piece.
580 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
581 (ConcatSrcs[i / SrcNumElts] >= 0 &&
582 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
583 return false;
584 // Remember which source this index came from.
585 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
586 }
587
588 // The shuffle is concatenating multiple vectors together.
589 // Collect the different operands for that.
590 Register UndefReg;
591 Register Src2 = MI.getOperand(i: 2).getReg();
592 for (auto Src : ConcatSrcs) {
593 if (Src < 0) {
594 if (!UndefReg) {
595 Builder.setInsertPt(MBB&: *MI.getParent(), II: MI);
596 UndefReg = Builder.buildUndef(Res: SrcType).getReg(Idx: 0);
597 }
598 Ops.push_back(Elt: UndefReg);
599 } else if (Src == 0)
600 Ops.push_back(Elt: Src1);
601 else
602 Ops.push_back(Elt: Src2);
603 }
604 return true;
605}
606
607void CombinerHelper::applyCombineShuffleVector(
608 MachineInstr &MI, const ArrayRef<Register> Ops) const {
609 Register DstReg = MI.getOperand(i: 0).getReg();
610 Builder.setInsertPt(MBB&: *MI.getParent(), II: MI);
611 Register NewDstReg = MRI.cloneVirtualRegister(VReg: DstReg);
612
613 if (Ops.size() == 1)
614 Builder.buildCopy(Res: NewDstReg, Op: Ops[0]);
615 else
616 Builder.buildMergeLikeInstr(Res: NewDstReg, Ops);
617
618 replaceRegWith(MRI, FromReg: DstReg, ToReg: NewDstReg);
619 MI.eraseFromParent();
620}
621
622bool CombinerHelper::matchShuffleToExtract(MachineInstr &MI) const {
623 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
624 "Invalid instruction kind");
625
626 ArrayRef<int> Mask = MI.getOperand(i: 3).getShuffleMask();
627 return Mask.size() == 1;
628}
629
630void CombinerHelper::applyShuffleToExtract(MachineInstr &MI) const {
631 Register DstReg = MI.getOperand(i: 0).getReg();
632 Builder.setInsertPt(MBB&: *MI.getParent(), II: MI);
633
634 int I = MI.getOperand(i: 3).getShuffleMask()[0];
635 Register Src1 = MI.getOperand(i: 1).getReg();
636 LLT Src1Ty = MRI.getType(Reg: Src1);
637 int Src1NumElts = Src1Ty.isVector() ? Src1Ty.getNumElements() : 1;
638 Register SrcReg;
639 if (I >= Src1NumElts) {
640 SrcReg = MI.getOperand(i: 2).getReg();
641 I -= Src1NumElts;
642 } else if (I >= 0)
643 SrcReg = Src1;
644
645 if (I < 0)
646 Builder.buildUndef(Res: DstReg);
647 else if (!MRI.getType(Reg: SrcReg).isVector())
648 Builder.buildCopy(Res: DstReg, Op: SrcReg);
649 else
650 Builder.buildExtractVectorElementConstant(Res: DstReg, Val: SrcReg, Idx: I);
651
652 MI.eraseFromParent();
653}
654
655namespace {
656
657/// Select a preference between two uses. CurrentUse is the current preference
658/// while *ForCandidate is attributes of the candidate under consideration.
659PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI,
660 PreferredTuple &CurrentUse,
661 const LLT TyForCandidate,
662 unsigned OpcodeForCandidate,
663 MachineInstr *MIForCandidate) {
664 if (!CurrentUse.Ty.isValid()) {
665 if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
666 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
667 return {.Ty: TyForCandidate, .ExtendOpcode: OpcodeForCandidate, .MI: MIForCandidate};
668 return CurrentUse;
669 }
670
671 // We permit the extend to hoist through basic blocks but this is only
672 // sensible if the target has extending loads. If you end up lowering back
673 // into a load and extend during the legalizer then the end result is
674 // hoisting the extend up to the load.
675
676 // Prefer defined extensions to undefined extensions as these are more
677 // likely to reduce the number of instructions.
678 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
679 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
680 return CurrentUse;
681 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
682 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
683 return {.Ty: TyForCandidate, .ExtendOpcode: OpcodeForCandidate, .MI: MIForCandidate};
684
685 // Prefer sign extensions to zero extensions as sign-extensions tend to be
686 // more expensive. Don't do this if the load is already a zero-extend load
687 // though, otherwise we'll rewrite a zero-extend load into a sign-extend
688 // later.
689 if (!isa<GZExtLoad>(Val: LoadMI) && CurrentUse.Ty == TyForCandidate) {
690 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
691 OpcodeForCandidate == TargetOpcode::G_ZEXT)
692 return CurrentUse;
693 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
694 OpcodeForCandidate == TargetOpcode::G_SEXT)
695 return {.Ty: TyForCandidate, .ExtendOpcode: OpcodeForCandidate, .MI: MIForCandidate};
696 }
697
698 // This is potentially target specific. We've chosen the largest type
699 // because G_TRUNC is usually free. One potential catch with this is that
700 // some targets have a reduced number of larger registers than smaller
701 // registers and this choice potentially increases the live-range for the
702 // larger value.
703 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
704 return {.Ty: TyForCandidate, .ExtendOpcode: OpcodeForCandidate, .MI: MIForCandidate};
705 }
706 return CurrentUse;
707}
708
709/// Find a suitable place to insert some instructions and insert them. This
710/// function accounts for special cases like inserting before a PHI node.
711/// The current strategy for inserting before PHI's is to duplicate the
712/// instructions for each predecessor. However, while that's ok for G_TRUNC
713/// on most targets since it generally requires no code, other targets/cases may
714/// want to try harder to find a dominating block.
715static void InsertInsnsWithoutSideEffectsBeforeUse(
716 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
717 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
718 MachineOperand &UseMO)>
719 Inserter) {
720 MachineInstr &UseMI = *UseMO.getParent();
721
722 MachineBasicBlock *InsertBB = UseMI.getParent();
723
724 // If the use is a PHI then we want the predecessor block instead.
725 if (UseMI.isPHI()) {
726 MachineOperand *PredBB = std::next(x: &UseMO);
727 InsertBB = PredBB->getMBB();
728 }
729
730 // If the block is the same block as the def then we want to insert just after
731 // the def instead of at the start of the block.
732 if (InsertBB == DefMI.getParent()) {
733 MachineBasicBlock::iterator InsertPt = &DefMI;
734 Inserter(InsertBB, std::next(x: InsertPt), UseMO);
735 return;
736 }
737
738 // Otherwise we want the start of the BB
739 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
740}
741} // end anonymous namespace
742
743bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) const {
744 PreferredTuple Preferred;
745 if (matchCombineExtendingLoads(MI, MatchInfo&: Preferred)) {
746 applyCombineExtendingLoads(MI, MatchInfo&: Preferred);
747 return true;
748 }
749 return false;
750}
751
752static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) {
753 unsigned CandidateLoadOpc;
754 switch (ExtOpc) {
755 case TargetOpcode::G_ANYEXT:
756 CandidateLoadOpc = TargetOpcode::G_LOAD;
757 break;
758 case TargetOpcode::G_SEXT:
759 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
760 break;
761 case TargetOpcode::G_ZEXT:
762 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
763 break;
764 default:
765 llvm_unreachable("Unexpected extend opc");
766 }
767 return CandidateLoadOpc;
768}
769
770bool CombinerHelper::matchCombineExtendingLoads(
771 MachineInstr &MI, PreferredTuple &Preferred) const {
772 // We match the loads and follow the uses to the extend instead of matching
773 // the extends and following the def to the load. This is because the load
774 // must remain in the same position for correctness (unless we also add code
775 // to find a safe place to sink it) whereas the extend is freely movable.
776 // It also prevents us from duplicating the load for the volatile case or just
777 // for performance.
778 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(Val: &MI);
779 if (!LoadMI)
780 return false;
781
782 Register LoadReg = LoadMI->getDstReg();
783
784 LLT LoadValueTy = MRI.getType(Reg: LoadReg);
785 if (!LoadValueTy.isScalar())
786 return false;
787
788 // Most architectures are going to legalize <s8 loads into at least a 1 byte
789 // load, and the MMOs can only describe memory accesses in multiples of bytes.
790 // If we try to perform extload combining on those, we can end up with
791 // %a(s8) = extload %ptr (load 1 byte from %ptr)
792 // ... which is an illegal extload instruction.
793 if (LoadValueTy.getSizeInBits() < 8)
794 return false;
795
796 // For non power-of-2 types, they will very likely be legalized into multiple
797 // loads. Don't bother trying to match them into extending loads.
798 if (!llvm::has_single_bit<uint32_t>(Value: LoadValueTy.getSizeInBits()))
799 return false;
800
801 // Find the preferred type aside from the any-extends (unless it's the only
802 // one) and non-extending ops. We'll emit an extending load to that type and
803 // and emit a variant of (extend (trunc X)) for the others according to the
804 // relative type sizes. At the same time, pick an extend to use based on the
805 // extend involved in the chosen type.
806 unsigned PreferredOpcode =
807 isa<GLoad>(Val: &MI)
808 ? TargetOpcode::G_ANYEXT
809 : isa<GSExtLoad>(Val: &MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
810 Preferred = {.Ty: LLT(), .ExtendOpcode: PreferredOpcode, .MI: nullptr};
811 for (auto &UseMI : MRI.use_nodbg_instructions(Reg: LoadReg)) {
812 if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
813 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
814 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
815 const auto &MMO = LoadMI->getMMO();
816 // Don't do anything for atomics.
817 if (MMO.isAtomic())
818 continue;
819 // Check for legality.
820 if (!isPreLegalize()) {
821 LegalityQuery::MemDesc MMDesc(MMO);
822 unsigned CandidateLoadOpc = getExtLoadOpcForExtend(ExtOpc: UseMI.getOpcode());
823 LLT UseTy = MRI.getType(Reg: UseMI.getOperand(i: 0).getReg());
824 LLT SrcTy = MRI.getType(Reg: LoadMI->getPointerReg());
825 if (LI->getAction(Query: {CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
826 .Action != LegalizeActions::Legal)
827 continue;
828 }
829 Preferred = ChoosePreferredUse(LoadMI&: MI, CurrentUse&: Preferred,
830 TyForCandidate: MRI.getType(Reg: UseMI.getOperand(i: 0).getReg()),
831 OpcodeForCandidate: UseMI.getOpcode(), MIForCandidate: &UseMI);
832 }
833 }
834
835 // There were no extends
836 if (!Preferred.MI)
837 return false;
838 // It should be impossible to chose an extend without selecting a different
839 // type since by definition the result of an extend is larger.
840 assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
841
842 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
843 return true;
844}
845
846void CombinerHelper::applyCombineExtendingLoads(
847 MachineInstr &MI, PreferredTuple &Preferred) const {
848 // Rewrite the load to the chosen extending load.
849 Register ChosenDstReg = Preferred.MI->getOperand(i: 0).getReg();
850
851 // Inserter to insert a truncate back to the original type at a given point
852 // with some basic CSE to limit truncate duplication to one per BB.
853 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
854 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
855 MachineBasicBlock::iterator InsertBefore,
856 MachineOperand &UseMO) {
857 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(Val: InsertIntoBB);
858 if (PreviouslyEmitted) {
859 Observer.changingInstr(MI&: *UseMO.getParent());
860 UseMO.setReg(PreviouslyEmitted->getOperand(i: 0).getReg());
861 Observer.changedInstr(MI&: *UseMO.getParent());
862 return;
863 }
864
865 Builder.setInsertPt(MBB&: *InsertIntoBB, II: InsertBefore);
866 Register NewDstReg = MRI.cloneVirtualRegister(VReg: MI.getOperand(i: 0).getReg());
867 MachineInstr *NewMI = Builder.buildTrunc(Res: NewDstReg, Op: ChosenDstReg);
868 EmittedInsns[InsertIntoBB] = NewMI;
869 replaceRegOpWith(MRI, FromRegOp&: UseMO, ToReg: NewDstReg);
870 };
871
872 Observer.changingInstr(MI);
873 unsigned LoadOpc = getExtLoadOpcForExtend(ExtOpc: Preferred.ExtendOpcode);
874 MI.setDesc(Builder.getTII().get(Opcode: LoadOpc));
875
876 // Rewrite all the uses to fix up the types.
877 auto &LoadValue = MI.getOperand(i: 0);
878 SmallVector<MachineOperand *, 4> Uses(
879 llvm::make_pointer_range(Range: MRI.use_operands(Reg: LoadValue.getReg())));
880
881 for (auto *UseMO : Uses) {
882 MachineInstr *UseMI = UseMO->getParent();
883
884 // If the extend is compatible with the preferred extend then we should fix
885 // up the type and extend so that it uses the preferred use.
886 if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
887 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
888 Register UseDstReg = UseMI->getOperand(i: 0).getReg();
889 MachineOperand &UseSrcMO = UseMI->getOperand(i: 1);
890 const LLT UseDstTy = MRI.getType(Reg: UseDstReg);
891 if (UseDstReg != ChosenDstReg) {
892 if (Preferred.Ty == UseDstTy) {
893 // If the use has the same type as the preferred use, then merge
894 // the vregs and erase the extend. For example:
895 // %1:_(s8) = G_LOAD ...
896 // %2:_(s32) = G_SEXT %1(s8)
897 // %3:_(s32) = G_ANYEXT %1(s8)
898 // ... = ... %3(s32)
899 // rewrites to:
900 // %2:_(s32) = G_SEXTLOAD ...
901 // ... = ... %2(s32)
902 replaceRegWith(MRI, FromReg: UseDstReg, ToReg: ChosenDstReg);
903 Observer.erasingInstr(MI&: *UseMO->getParent());
904 UseMO->getParent()->eraseFromParent();
905 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
906 // If the preferred size is smaller, then keep the extend but extend
907 // from the result of the extending load. For example:
908 // %1:_(s8) = G_LOAD ...
909 // %2:_(s32) = G_SEXT %1(s8)
910 // %3:_(s64) = G_ANYEXT %1(s8)
911 // ... = ... %3(s64)
912 /// rewrites to:
913 // %2:_(s32) = G_SEXTLOAD ...
914 // %3:_(s64) = G_ANYEXT %2:_(s32)
915 // ... = ... %3(s64)
916 replaceRegOpWith(MRI, FromRegOp&: UseSrcMO, ToReg: ChosenDstReg);
917 } else {
918 // If the preferred size is large, then insert a truncate. For
919 // example:
920 // %1:_(s8) = G_LOAD ...
921 // %2:_(s64) = G_SEXT %1(s8)
922 // %3:_(s32) = G_ZEXT %1(s8)
923 // ... = ... %3(s32)
924 /// rewrites to:
925 // %2:_(s64) = G_SEXTLOAD ...
926 // %4:_(s8) = G_TRUNC %2:_(s32)
927 // %3:_(s64) = G_ZEXT %2:_(s8)
928 // ... = ... %3(s64)
929 InsertInsnsWithoutSideEffectsBeforeUse(Builder, DefMI&: MI, UseMO&: *UseMO,
930 Inserter: InsertTruncAt);
931 }
932 continue;
933 }
934 // The use is (one of) the uses of the preferred use we chose earlier.
935 // We're going to update the load to def this value later so just erase
936 // the old extend.
937 Observer.erasingInstr(MI&: *UseMO->getParent());
938 UseMO->getParent()->eraseFromParent();
939 continue;
940 }
941
942 // The use isn't an extend. Truncate back to the type we originally loaded.
943 // This is free on many targets.
944 InsertInsnsWithoutSideEffectsBeforeUse(Builder, DefMI&: MI, UseMO&: *UseMO, Inserter: InsertTruncAt);
945 }
946
947 MI.getOperand(i: 0).setReg(ChosenDstReg);
948 Observer.changedInstr(MI);
949}
950
951bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
952 BuildFnTy &MatchInfo) const {
953 assert(MI.getOpcode() == TargetOpcode::G_AND);
954
955 // If we have the following code:
956 // %mask = G_CONSTANT 255
957 // %ld = G_LOAD %ptr, (load s16)
958 // %and = G_AND %ld, %mask
959 //
960 // Try to fold it into
961 // %ld = G_ZEXTLOAD %ptr, (load s8)
962
963 Register Dst = MI.getOperand(i: 0).getReg();
964 if (MRI.getType(Reg: Dst).isVector())
965 return false;
966
967 auto MaybeMask =
968 getIConstantVRegValWithLookThrough(VReg: MI.getOperand(i: 2).getReg(), MRI);
969 if (!MaybeMask)
970 return false;
971
972 APInt MaskVal = MaybeMask->Value;
973
974 if (!MaskVal.isMask())
975 return false;
976
977 Register SrcReg = MI.getOperand(i: 1).getReg();
978 // Don't use getOpcodeDef() here since intermediate instructions may have
979 // multiple users.
980 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(Val: MRI.getVRegDef(Reg: SrcReg));
981 if (!LoadMI || !MRI.hasOneNonDBGUse(RegNo: LoadMI->getDstReg()))
982 return false;
983
984 Register LoadReg = LoadMI->getDstReg();
985 LLT RegTy = MRI.getType(Reg: LoadReg);
986 Register PtrReg = LoadMI->getPointerReg();
987 unsigned RegSize = RegTy.getSizeInBits();
988 LocationSize LoadSizeBits = LoadMI->getMemSizeInBits();
989 unsigned MaskSizeBits = MaskVal.countr_one();
990
991 // The mask may not be larger than the in-memory type, as it might cover sign
992 // extended bits
993 if (MaskSizeBits > LoadSizeBits.getValue())
994 return false;
995
996 // If the mask covers the whole destination register, there's nothing to
997 // extend
998 if (MaskSizeBits >= RegSize)
999 return false;
1000
1001 // Most targets cannot deal with loads of size < 8 and need to re-legalize to
1002 // at least byte loads. Avoid creating such loads here
1003 if (MaskSizeBits < 8 || !isPowerOf2_32(Value: MaskSizeBits))
1004 return false;
1005
1006 const MachineMemOperand &MMO = LoadMI->getMMO();
1007 LegalityQuery::MemDesc MemDesc(MMO);
1008
1009 // Don't modify the memory access size if this is atomic/volatile, but we can
1010 // still adjust the opcode to indicate the high bit behavior.
1011 if (LoadMI->isSimple())
1012 MemDesc.MemoryTy = LLT::scalar(SizeInBits: MaskSizeBits);
1013 else if (LoadSizeBits.getValue() > MaskSizeBits ||
1014 LoadSizeBits.getValue() == RegSize)
1015 return false;
1016
1017 // TODO: Could check if it's legal with the reduced or original memory size.
1018 if (!isLegalOrBeforeLegalizer(
1019 Query: {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(Reg: PtrReg)}, {MemDesc}}))
1020 return false;
1021
1022 MatchInfo = [=](MachineIRBuilder &B) {
1023 B.setInstrAndDebugLoc(*LoadMI);
1024 auto &MF = B.getMF();
1025 auto PtrInfo = MMO.getPointerInfo();
1026 auto *NewMMO = MF.getMachineMemOperand(MMO: &MMO, PtrInfo, Ty: MemDesc.MemoryTy);
1027 B.buildLoadInstr(Opcode: TargetOpcode::G_ZEXTLOAD, Res: Dst, Addr: PtrReg, MMO&: *NewMMO);
1028 LoadMI->eraseFromParent();
1029 };
1030 return true;
1031}
1032
1033bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
1034 const MachineInstr &UseMI) const {
1035 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
1036 "shouldn't consider debug uses");
1037 assert(DefMI.getParent() == UseMI.getParent());
1038 if (&DefMI == &UseMI)
1039 return true;
1040 const MachineBasicBlock &MBB = *DefMI.getParent();
1041 auto DefOrUse = find_if(Range: MBB, P: [&DefMI, &UseMI](const MachineInstr &MI) {
1042 return &MI == &DefMI || &MI == &UseMI;
1043 });
1044 if (DefOrUse == MBB.end())
1045 llvm_unreachable("Block must contain both DefMI and UseMI!");
1046 return &*DefOrUse == &DefMI;
1047}
1048
1049bool CombinerHelper::dominates(const MachineInstr &DefMI,
1050 const MachineInstr &UseMI) const {
1051 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
1052 "shouldn't consider debug uses");
1053 if (MDT)
1054 return MDT->dominates(A: &DefMI, B: &UseMI);
1055 else if (DefMI.getParent() != UseMI.getParent())
1056 return false;
1057
1058 return isPredecessor(DefMI, UseMI);
1059}
1060
1061bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) const {
1062 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1063 Register SrcReg = MI.getOperand(i: 1).getReg();
1064 Register LoadUser = SrcReg;
1065
1066 if (MRI.getType(Reg: SrcReg).isVector())
1067 return false;
1068
1069 Register TruncSrc;
1070 if (mi_match(R: SrcReg, MRI, P: m_GTrunc(Src: m_Reg(R&: TruncSrc))))
1071 LoadUser = TruncSrc;
1072
1073 uint64_t SizeInBits = MI.getOperand(i: 2).getImm();
1074 // If the source is a G_SEXTLOAD from the same bit width, then we don't
1075 // need any extend at all, just a truncate.
1076 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(Reg: LoadUser, MRI)) {
1077 // If truncating more than the original extended value, abort.
1078 auto LoadSizeBits = LoadMI->getMemSizeInBits();
1079 if (TruncSrc &&
1080 MRI.getType(Reg: TruncSrc).getSizeInBits() < LoadSizeBits.getValue())
1081 return false;
1082 if (LoadSizeBits == SizeInBits)
1083 return true;
1084 }
1085 return false;
1086}
1087
1088void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) const {
1089 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1090 Builder.buildCopy(Res: MI.getOperand(i: 0).getReg(), Op: MI.getOperand(i: 1).getReg());
1091 MI.eraseFromParent();
1092}
1093
1094bool CombinerHelper::matchSextInRegOfLoad(
1095 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) const {
1096 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1097
1098 Register DstReg = MI.getOperand(i: 0).getReg();
1099 LLT RegTy = MRI.getType(Reg: DstReg);
1100
1101 // Only supports scalars for now.
1102 if (RegTy.isVector())
1103 return false;
1104
1105 Register SrcReg = MI.getOperand(i: 1).getReg();
1106 auto *LoadDef = getOpcodeDef<GLoad>(Reg: SrcReg, MRI);
1107 if (!LoadDef || !MRI.hasOneNonDBGUse(RegNo: SrcReg))
1108 return false;
1109
1110 uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
1111
1112 // If the sign extend extends from a narrower width than the load's width,
1113 // then we can narrow the load width when we combine to a G_SEXTLOAD.
1114 // Avoid widening the load at all.
1115 unsigned NewSizeBits = std::min(a: (uint64_t)MI.getOperand(i: 2).getImm(), b: MemBits);
1116
1117 // Don't generate G_SEXTLOADs with a < 1 byte width.
1118 if (NewSizeBits < 8)
1119 return false;
1120 // Don't bother creating a non-power-2 sextload, it will likely be broken up
1121 // anyway for most targets.
1122 if (!isPowerOf2_32(Value: NewSizeBits))
1123 return false;
1124
1125 const MachineMemOperand &MMO = LoadDef->getMMO();
1126 LegalityQuery::MemDesc MMDesc(MMO);
1127
1128 // Don't modify the memory access size if this is atomic/volatile, but we can
1129 // still adjust the opcode to indicate the high bit behavior.
1130 if (LoadDef->isSimple())
1131 MMDesc.MemoryTy = LLT::scalar(SizeInBits: NewSizeBits);
1132 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
1133 return false;
1134
1135 // TODO: Could check if it's legal with the reduced or original memory size.
1136 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SEXTLOAD,
1137 {MRI.getType(Reg: LoadDef->getDstReg()),
1138 MRI.getType(Reg: LoadDef->getPointerReg())},
1139 {MMDesc}}))
1140 return false;
1141
1142 MatchInfo = std::make_tuple(args: LoadDef->getDstReg(), args&: NewSizeBits);
1143 return true;
1144}
1145
1146void CombinerHelper::applySextInRegOfLoad(
1147 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) const {
1148 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1149 Register LoadReg;
1150 unsigned ScalarSizeBits;
1151 std::tie(args&: LoadReg, args&: ScalarSizeBits) = MatchInfo;
1152 GLoad *LoadDef = cast<GLoad>(Val: MRI.getVRegDef(Reg: LoadReg));
1153
1154 // If we have the following:
1155 // %ld = G_LOAD %ptr, (load 2)
1156 // %ext = G_SEXT_INREG %ld, 8
1157 // ==>
1158 // %ld = G_SEXTLOAD %ptr (load 1)
1159
1160 auto &MMO = LoadDef->getMMO();
1161 Builder.setInstrAndDebugLoc(*LoadDef);
1162 auto &MF = Builder.getMF();
1163 auto PtrInfo = MMO.getPointerInfo();
1164 auto *NewMMO = MF.getMachineMemOperand(MMO: &MMO, PtrInfo, Size: ScalarSizeBits / 8);
1165 Builder.buildLoadInstr(Opcode: TargetOpcode::G_SEXTLOAD, Res: MI.getOperand(i: 0).getReg(),
1166 Addr: LoadDef->getPointerReg(), MMO&: *NewMMO);
1167 MI.eraseFromParent();
1168
1169 // Not all loads can be deleted, so make sure the old one is removed.
1170 LoadDef->eraseFromParent();
1171}
1172
1173/// Return true if 'MI' is a load or a store that may be fold it's address
1174/// operand into the load / store addressing mode.
1175static bool canFoldInAddressingMode(GLoadStore *MI, const TargetLowering &TLI,
1176 MachineRegisterInfo &MRI) {
1177 TargetLowering::AddrMode AM;
1178 auto *MF = MI->getMF();
1179 auto *Addr = getOpcodeDef<GPtrAdd>(Reg: MI->getPointerReg(), MRI);
1180 if (!Addr)
1181 return false;
1182
1183 AM.HasBaseReg = true;
1184 if (auto CstOff = getIConstantVRegVal(VReg: Addr->getOffsetReg(), MRI))
1185 AM.BaseOffs = CstOff->getSExtValue(); // [reg +/- imm]
1186 else
1187 AM.Scale = 1; // [reg +/- reg]
1188
1189 return TLI.isLegalAddressingMode(
1190 DL: MF->getDataLayout(), AM,
1191 Ty: getTypeForLLT(Ty: MI->getMMO().getMemoryType(),
1192 C&: MF->getFunction().getContext()),
1193 AddrSpace: MI->getMMO().getAddrSpace());
1194}
1195
1196static unsigned getIndexedOpc(unsigned LdStOpc) {
1197 switch (LdStOpc) {
1198 case TargetOpcode::G_LOAD:
1199 return TargetOpcode::G_INDEXED_LOAD;
1200 case TargetOpcode::G_STORE:
1201 return TargetOpcode::G_INDEXED_STORE;
1202 case TargetOpcode::G_ZEXTLOAD:
1203 return TargetOpcode::G_INDEXED_ZEXTLOAD;
1204 case TargetOpcode::G_SEXTLOAD:
1205 return TargetOpcode::G_INDEXED_SEXTLOAD;
1206 default:
1207 llvm_unreachable("Unexpected opcode");
1208 }
1209}
1210
1211bool CombinerHelper::isIndexedLoadStoreLegal(GLoadStore &LdSt) const {
1212 // Check for legality.
1213 LLT PtrTy = MRI.getType(Reg: LdSt.getPointerReg());
1214 LLT Ty = MRI.getType(Reg: LdSt.getReg(Idx: 0));
1215 LLT MemTy = LdSt.getMMO().getMemoryType();
1216 SmallVector<LegalityQuery::MemDesc, 2> MemDescrs(
1217 {{MemTy, MemTy.getSizeInBits().getKnownMinValue(),
1218 AtomicOrdering::NotAtomic}});
1219 unsigned IndexedOpc = getIndexedOpc(LdStOpc: LdSt.getOpcode());
1220 SmallVector<LLT> OpTys;
1221 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
1222 OpTys = {PtrTy, Ty, Ty};
1223 else
1224 OpTys = {Ty, PtrTy}; // For G_INDEXED_LOAD, G_INDEXED_[SZ]EXTLOAD
1225
1226 LegalityQuery Q(IndexedOpc, OpTys, MemDescrs);
1227 return isLegal(Query: Q);
1228}
1229
1230static cl::opt<unsigned> PostIndexUseThreshold(
1231 "post-index-use-threshold", cl::Hidden, cl::init(Val: 32),
1232 cl::desc("Number of uses of a base pointer to check before it is no longer "
1233 "considered for post-indexing."));
1234
1235bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr,
1236 Register &Base, Register &Offset,
1237 bool &RematOffset) const {
1238 // We're looking for the following pattern, for either load or store:
1239 // %baseptr:_(p0) = ...
1240 // G_STORE %val(s64), %baseptr(p0)
1241 // %offset:_(s64) = G_CONSTANT i64 -256
1242 // %new_addr:_(p0) = G_PTR_ADD %baseptr, %offset(s64)
1243 const auto &TLI = getTargetLowering();
1244
1245 Register Ptr = LdSt.getPointerReg();
1246 // If the store is the only use, don't bother.
1247 if (MRI.hasOneNonDBGUse(RegNo: Ptr))
1248 return false;
1249
1250 if (!isIndexedLoadStoreLegal(LdSt))
1251 return false;
1252
1253 if (getOpcodeDef(Opcode: TargetOpcode::G_FRAME_INDEX, Reg: Ptr, MRI))
1254 return false;
1255
1256 MachineInstr *StoredValDef = getDefIgnoringCopies(Reg: LdSt.getReg(Idx: 0), MRI);
1257 auto *PtrDef = MRI.getVRegDef(Reg: Ptr);
1258
1259 unsigned NumUsesChecked = 0;
1260 for (auto &Use : MRI.use_nodbg_instructions(Reg: Ptr)) {
1261 if (++NumUsesChecked > PostIndexUseThreshold)
1262 return false; // Try to avoid exploding compile time.
1263
1264 auto *PtrAdd = dyn_cast<GPtrAdd>(Val: &Use);
1265 // The use itself might be dead. This can happen during combines if DCE
1266 // hasn't had a chance to run yet. Don't allow it to form an indexed op.
1267 if (!PtrAdd || MRI.use_nodbg_empty(RegNo: PtrAdd->getReg(Idx: 0)))
1268 continue;
1269
1270 // Check the user of this isn't the store, otherwise we'd be generate a
1271 // indexed store defining its own use.
1272 if (StoredValDef == &Use)
1273 continue;
1274
1275 Offset = PtrAdd->getOffsetReg();
1276 if (!ForceLegalIndexing &&
1277 !TLI.isIndexingLegal(MI&: LdSt, Base: PtrAdd->getBaseReg(), Offset,
1278 /*IsPre*/ false, MRI))
1279 continue;
1280
1281 // Make sure the offset calculation is before the potentially indexed op.
1282 MachineInstr *OffsetDef = MRI.getVRegDef(Reg: Offset);
1283 RematOffset = false;
1284 if (!dominates(DefMI: *OffsetDef, UseMI: LdSt)) {
1285 // If the offset however is just a G_CONSTANT, we can always just
1286 // rematerialize it where we need it.
1287 if (OffsetDef->getOpcode() != TargetOpcode::G_CONSTANT)
1288 continue;
1289 RematOffset = true;
1290 }
1291
1292 for (auto &BasePtrUse : MRI.use_nodbg_instructions(Reg: PtrAdd->getBaseReg())) {
1293 if (&BasePtrUse == PtrDef)
1294 continue;
1295
1296 // If the user is a later load/store that can be post-indexed, then don't
1297 // combine this one.
1298 auto *BasePtrLdSt = dyn_cast<GLoadStore>(Val: &BasePtrUse);
1299 if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
1300 dominates(DefMI: LdSt, UseMI: *BasePtrLdSt) &&
1301 isIndexedLoadStoreLegal(LdSt&: *BasePtrLdSt))
1302 return false;
1303
1304 // Now we're looking for the key G_PTR_ADD instruction, which contains
1305 // the offset add that we want to fold.
1306 if (auto *BasePtrUseDef = dyn_cast<GPtrAdd>(Val: &BasePtrUse)) {
1307 Register PtrAddDefReg = BasePtrUseDef->getReg(Idx: 0);
1308 for (auto &BaseUseUse : MRI.use_nodbg_instructions(Reg: PtrAddDefReg)) {
1309 // If the use is in a different block, then we may produce worse code
1310 // due to the extra register pressure.
1311 if (BaseUseUse.getParent() != LdSt.getParent())
1312 return false;
1313
1314 if (auto *UseUseLdSt = dyn_cast<GLoadStore>(Val: &BaseUseUse))
1315 if (canFoldInAddressingMode(MI: UseUseLdSt, TLI, MRI))
1316 return false;
1317 }
1318 if (!dominates(DefMI: LdSt, UseMI: BasePtrUse))
1319 return false; // All use must be dominated by the load/store.
1320 }
1321 }
1322
1323 Addr = PtrAdd->getReg(Idx: 0);
1324 Base = PtrAdd->getBaseReg();
1325 return true;
1326 }
1327
1328 return false;
1329}
1330
1331bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr,
1332 Register &Base,
1333 Register &Offset) const {
1334 auto &MF = *LdSt.getParent()->getParent();
1335 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1336
1337 Addr = LdSt.getPointerReg();
1338 if (!mi_match(R: Addr, MRI, P: m_GPtrAdd(L: m_Reg(R&: Base), R: m_Reg(R&: Offset))) ||
1339 MRI.hasOneNonDBGUse(RegNo: Addr))
1340 return false;
1341
1342 if (!ForceLegalIndexing &&
1343 !TLI.isIndexingLegal(MI&: LdSt, Base, Offset, /*IsPre*/ true, MRI))
1344 return false;
1345
1346 if (!isIndexedLoadStoreLegal(LdSt))
1347 return false;
1348
1349 MachineInstr *BaseDef = getDefIgnoringCopies(Reg: Base, MRI);
1350 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1351 return false;
1352
1353 if (auto *St = dyn_cast<GStore>(Val: &LdSt)) {
1354 // Would require a copy.
1355 if (Base == St->getValueReg())
1356 return false;
1357
1358 // We're expecting one use of Addr in MI, but it could also be the
1359 // value stored, which isn't actually dominated by the instruction.
1360 if (St->getValueReg() == Addr)
1361 return false;
1362 }
1363
1364 // Avoid increasing cross-block register pressure.
1365 for (auto &AddrUse : MRI.use_nodbg_instructions(Reg: Addr))
1366 if (AddrUse.getParent() != LdSt.getParent())
1367 return false;
1368
1369 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1370 // That might allow us to end base's liveness here by adjusting the constant.
1371 bool RealUse = false;
1372 for (auto &AddrUse : MRI.use_nodbg_instructions(Reg: Addr)) {
1373 if (!dominates(DefMI: LdSt, UseMI: AddrUse))
1374 return false; // All use must be dominated by the load/store.
1375
1376 // If Ptr may be folded in addressing mode of other use, then it's
1377 // not profitable to do this transformation.
1378 if (auto *UseLdSt = dyn_cast<GLoadStore>(Val: &AddrUse)) {
1379 if (!canFoldInAddressingMode(MI: UseLdSt, TLI, MRI))
1380 RealUse = true;
1381 } else {
1382 RealUse = true;
1383 }
1384 }
1385 return RealUse;
1386}
1387
1388bool CombinerHelper::matchCombineExtractedVectorLoad(
1389 MachineInstr &MI, BuildFnTy &MatchInfo) const {
1390 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1391
1392 // Check if there is a load that defines the vector being extracted from.
1393 auto *LoadMI = getOpcodeDef<GLoad>(Reg: MI.getOperand(i: 1).getReg(), MRI);
1394 if (!LoadMI)
1395 return false;
1396
1397 Register Vector = MI.getOperand(i: 1).getReg();
1398 LLT VecEltTy = MRI.getType(Reg: Vector).getElementType();
1399
1400 assert(MRI.getType(MI.getOperand(0).getReg()) == VecEltTy);
1401
1402 // Checking whether we should reduce the load width.
1403 if (!MRI.hasOneNonDBGUse(RegNo: Vector))
1404 return false;
1405
1406 // Check if the defining load is simple.
1407 if (!LoadMI->isSimple())
1408 return false;
1409
1410 // If the vector element type is not a multiple of a byte then we are unable
1411 // to correctly compute an address to load only the extracted element as a
1412 // scalar.
1413 if (!VecEltTy.isByteSized())
1414 return false;
1415
1416 // Check for load fold barriers between the extraction and the load.
1417 if (MI.getParent() != LoadMI->getParent())
1418 return false;
1419 const unsigned MaxIter = 20;
1420 unsigned Iter = 0;
1421 for (auto II = LoadMI->getIterator(), IE = MI.getIterator(); II != IE; ++II) {
1422 if (II->isLoadFoldBarrier())
1423 return false;
1424 if (Iter++ == MaxIter)
1425 return false;
1426 }
1427
1428 // Check if the new load that we are going to create is legal
1429 // if we are in the post-legalization phase.
1430 MachineMemOperand MMO = LoadMI->getMMO();
1431 Align Alignment = MMO.getAlign();
1432 MachinePointerInfo PtrInfo;
1433 uint64_t Offset;
1434
1435 // Finding the appropriate PtrInfo if offset is a known constant.
1436 // This is required to create the memory operand for the narrowed load.
1437 // This machine memory operand object helps us infer about legality
1438 // before we proceed to combine the instruction.
1439 if (auto CVal = getIConstantVRegVal(VReg: Vector, MRI)) {
1440 int Elt = CVal->getZExtValue();
1441 // FIXME: should be (ABI size)*Elt.
1442 Offset = VecEltTy.getSizeInBits() * Elt / 8;
1443 PtrInfo = MMO.getPointerInfo().getWithOffset(O: Offset);
1444 } else {
1445 // Discard the pointer info except the address space because the memory
1446 // operand can't represent this new access since the offset is variable.
1447 Offset = VecEltTy.getSizeInBits() / 8;
1448 PtrInfo = MachinePointerInfo(MMO.getPointerInfo().getAddrSpace());
1449 }
1450
1451 Alignment = commonAlignment(A: Alignment, Offset);
1452
1453 Register VecPtr = LoadMI->getPointerReg();
1454 LLT PtrTy = MRI.getType(Reg: VecPtr);
1455
1456 MachineFunction &MF = *MI.getMF();
1457 auto *NewMMO = MF.getMachineMemOperand(MMO: &MMO, PtrInfo, Ty: VecEltTy);
1458
1459 LegalityQuery::MemDesc MMDesc(*NewMMO);
1460
1461 if (!isLegalOrBeforeLegalizer(
1462 Query: {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}}))
1463 return false;
1464
1465 // Load must be allowed and fast on the target.
1466 LLVMContext &C = MF.getFunction().getContext();
1467 auto &DL = MF.getDataLayout();
1468 unsigned Fast = 0;
1469 if (!getTargetLowering().allowsMemoryAccess(Context&: C, DL, Ty: VecEltTy, MMO: *NewMMO,
1470 Fast: &Fast) ||
1471 !Fast)
1472 return false;
1473
1474 Register Result = MI.getOperand(i: 0).getReg();
1475 Register Index = MI.getOperand(i: 2).getReg();
1476
1477 MatchInfo = [=](MachineIRBuilder &B) {
1478 GISelObserverWrapper DummyObserver;
1479 LegalizerHelper Helper(B.getMF(), DummyObserver, B);
1480 //// Get pointer to the vector element.
1481 Register finalPtr = Helper.getVectorElementPointer(
1482 VecPtr: LoadMI->getPointerReg(), VecTy: MRI.getType(Reg: LoadMI->getOperand(i: 0).getReg()),
1483 Index);
1484 // New G_LOAD instruction.
1485 B.buildLoad(Res: Result, Addr: finalPtr, PtrInfo, Alignment);
1486 // Remove original GLOAD instruction.
1487 LoadMI->eraseFromParent();
1488 };
1489
1490 return true;
1491}
1492
1493bool CombinerHelper::matchCombineIndexedLoadStore(
1494 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const {
1495 auto &LdSt = cast<GLoadStore>(Val&: MI);
1496
1497 if (LdSt.isAtomic())
1498 return false;
1499
1500 MatchInfo.IsPre = findPreIndexCandidate(LdSt, Addr&: MatchInfo.Addr, Base&: MatchInfo.Base,
1501 Offset&: MatchInfo.Offset);
1502 if (!MatchInfo.IsPre &&
1503 !findPostIndexCandidate(LdSt, Addr&: MatchInfo.Addr, Base&: MatchInfo.Base,
1504 Offset&: MatchInfo.Offset, RematOffset&: MatchInfo.RematOffset))
1505 return false;
1506
1507 return true;
1508}
1509
1510void CombinerHelper::applyCombineIndexedLoadStore(
1511 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const {
1512 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(Reg: MatchInfo.Addr);
1513 unsigned Opcode = MI.getOpcode();
1514 bool IsStore = Opcode == TargetOpcode::G_STORE;
1515 unsigned NewOpcode = getIndexedOpc(LdStOpc: Opcode);
1516
1517 // If the offset constant didn't happen to dominate the load/store, we can
1518 // just clone it as needed.
1519 if (MatchInfo.RematOffset) {
1520 auto *OldCst = MRI.getVRegDef(Reg: MatchInfo.Offset);
1521 auto NewCst = Builder.buildConstant(Res: MRI.getType(Reg: MatchInfo.Offset),
1522 Val: *OldCst->getOperand(i: 1).getCImm());
1523 MatchInfo.Offset = NewCst.getReg(Idx: 0);
1524 }
1525
1526 auto MIB = Builder.buildInstr(Opcode: NewOpcode);
1527 if (IsStore) {
1528 MIB.addDef(RegNo: MatchInfo.Addr);
1529 MIB.addUse(RegNo: MI.getOperand(i: 0).getReg());
1530 } else {
1531 MIB.addDef(RegNo: MI.getOperand(i: 0).getReg());
1532 MIB.addDef(RegNo: MatchInfo.Addr);
1533 }
1534
1535 MIB.addUse(RegNo: MatchInfo.Base);
1536 MIB.addUse(RegNo: MatchInfo.Offset);
1537 MIB.addImm(Val: MatchInfo.IsPre);
1538 MIB->cloneMemRefs(MF&: *MI.getMF(), MI);
1539 MI.eraseFromParent();
1540 AddrDef.eraseFromParent();
1541
1542 LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
1543}
1544
1545bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
1546 MachineInstr *&OtherMI) const {
1547 unsigned Opcode = MI.getOpcode();
1548 bool IsDiv, IsSigned;
1549
1550 switch (Opcode) {
1551 default:
1552 llvm_unreachable("Unexpected opcode!");
1553 case TargetOpcode::G_SDIV:
1554 case TargetOpcode::G_UDIV: {
1555 IsDiv = true;
1556 IsSigned = Opcode == TargetOpcode::G_SDIV;
1557 break;
1558 }
1559 case TargetOpcode::G_SREM:
1560 case TargetOpcode::G_UREM: {
1561 IsDiv = false;
1562 IsSigned = Opcode == TargetOpcode::G_SREM;
1563 break;
1564 }
1565 }
1566
1567 Register Src1 = MI.getOperand(i: 1).getReg();
1568 unsigned DivOpcode, RemOpcode, DivremOpcode;
1569 if (IsSigned) {
1570 DivOpcode = TargetOpcode::G_SDIV;
1571 RemOpcode = TargetOpcode::G_SREM;
1572 DivremOpcode = TargetOpcode::G_SDIVREM;
1573 } else {
1574 DivOpcode = TargetOpcode::G_UDIV;
1575 RemOpcode = TargetOpcode::G_UREM;
1576 DivremOpcode = TargetOpcode::G_UDIVREM;
1577 }
1578
1579 if (!isLegalOrBeforeLegalizer(Query: {DivremOpcode, {MRI.getType(Reg: Src1)}}))
1580 return false;
1581
1582 // Combine:
1583 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1584 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1585 // into:
1586 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1587
1588 // Combine:
1589 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1590 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1591 // into:
1592 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1593
1594 for (auto &UseMI : MRI.use_nodbg_instructions(Reg: Src1)) {
1595 if (MI.getParent() == UseMI.getParent() &&
1596 ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1597 (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1598 matchEqualDefs(MOP1: MI.getOperand(i: 2), MOP2: UseMI.getOperand(i: 2)) &&
1599 matchEqualDefs(MOP1: MI.getOperand(i: 1), MOP2: UseMI.getOperand(i: 1))) {
1600 OtherMI = &UseMI;
1601 return true;
1602 }
1603 }
1604
1605 return false;
1606}
1607
1608void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1609 MachineInstr *&OtherMI) const {
1610 unsigned Opcode = MI.getOpcode();
1611 assert(OtherMI && "OtherMI shouldn't be empty.");
1612
1613 Register DestDivReg, DestRemReg;
1614 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1615 DestDivReg = MI.getOperand(i: 0).getReg();
1616 DestRemReg = OtherMI->getOperand(i: 0).getReg();
1617 } else {
1618 DestDivReg = OtherMI->getOperand(i: 0).getReg();
1619 DestRemReg = MI.getOperand(i: 0).getReg();
1620 }
1621
1622 bool IsSigned =
1623 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1624
1625 // Check which instruction is first in the block so we don't break def-use
1626 // deps by "moving" the instruction incorrectly. Also keep track of which
1627 // instruction is first so we pick it's operands, avoiding use-before-def
1628 // bugs.
1629 MachineInstr *FirstInst = dominates(DefMI: MI, UseMI: *OtherMI) ? &MI : OtherMI;
1630 Builder.setInstrAndDebugLoc(*FirstInst);
1631
1632 Builder.buildInstr(Opc: IsSigned ? TargetOpcode::G_SDIVREM
1633 : TargetOpcode::G_UDIVREM,
1634 DstOps: {DestDivReg, DestRemReg},
1635 SrcOps: { FirstInst->getOperand(i: 1), FirstInst->getOperand(i: 2) });
1636 MI.eraseFromParent();
1637 OtherMI->eraseFromParent();
1638}
1639
1640bool CombinerHelper::matchOptBrCondByInvertingCond(
1641 MachineInstr &MI, MachineInstr *&BrCond) const {
1642 assert(MI.getOpcode() == TargetOpcode::G_BR);
1643
1644 // Try to match the following:
1645 // bb1:
1646 // G_BRCOND %c1, %bb2
1647 // G_BR %bb3
1648 // bb2:
1649 // ...
1650 // bb3:
1651
1652 // The above pattern does not have a fall through to the successor bb2, always
1653 // resulting in a branch no matter which path is taken. Here we try to find
1654 // and replace that pattern with conditional branch to bb3 and otherwise
1655 // fallthrough to bb2. This is generally better for branch predictors.
1656
1657 MachineBasicBlock *MBB = MI.getParent();
1658 MachineBasicBlock::iterator BrIt(MI);
1659 if (BrIt == MBB->begin())
1660 return false;
1661 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1662
1663 BrCond = &*std::prev(x: BrIt);
1664 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1665 return false;
1666
1667 // Check that the next block is the conditional branch target. Also make sure
1668 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1669 MachineBasicBlock *BrCondTarget = BrCond->getOperand(i: 1).getMBB();
1670 return BrCondTarget != MI.getOperand(i: 0).getMBB() &&
1671 MBB->isLayoutSuccessor(MBB: BrCondTarget);
1672}
1673
1674void CombinerHelper::applyOptBrCondByInvertingCond(
1675 MachineInstr &MI, MachineInstr *&BrCond) const {
1676 MachineBasicBlock *BrTarget = MI.getOperand(i: 0).getMBB();
1677 Builder.setInstrAndDebugLoc(*BrCond);
1678 LLT Ty = MRI.getType(Reg: BrCond->getOperand(i: 0).getReg());
1679 // FIXME: Does int/fp matter for this? If so, we might need to restrict
1680 // this to i1 only since we might not know for sure what kind of
1681 // compare generated the condition value.
1682 auto True = Builder.buildConstant(
1683 Res: Ty, Val: getICmpTrueVal(TLI: getTargetLowering(), IsVector: false, IsFP: false));
1684 auto Xor = Builder.buildXor(Dst: Ty, Src0: BrCond->getOperand(i: 0), Src1: True);
1685
1686 auto *FallthroughBB = BrCond->getOperand(i: 1).getMBB();
1687 Observer.changingInstr(MI);
1688 MI.getOperand(i: 0).setMBB(FallthroughBB);
1689 Observer.changedInstr(MI);
1690
1691 // Change the conditional branch to use the inverted condition and
1692 // new target block.
1693 Observer.changingInstr(MI&: *BrCond);
1694 BrCond->getOperand(i: 0).setReg(Xor.getReg(Idx: 0));
1695 BrCond->getOperand(i: 1).setMBB(BrTarget);
1696 Observer.changedInstr(MI&: *BrCond);
1697}
1698
1699bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) const {
1700 MachineIRBuilder HelperBuilder(MI);
1701 GISelObserverWrapper DummyObserver;
1702 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1703 return Helper.lowerMemcpyInline(MI) ==
1704 LegalizerHelper::LegalizeResult::Legalized;
1705}
1706
1707bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI,
1708 unsigned MaxLen) const {
1709 MachineIRBuilder HelperBuilder(MI);
1710 GISelObserverWrapper DummyObserver;
1711 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1712 return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1713 LegalizerHelper::LegalizeResult::Legalized;
1714}
1715
1716static APFloat constantFoldFpUnary(const MachineInstr &MI,
1717 const MachineRegisterInfo &MRI,
1718 const APFloat &Val) {
1719 APFloat Result(Val);
1720 switch (MI.getOpcode()) {
1721 default:
1722 llvm_unreachable("Unexpected opcode!");
1723 case TargetOpcode::G_FNEG: {
1724 Result.changeSign();
1725 return Result;
1726 }
1727 case TargetOpcode::G_FABS: {
1728 Result.clearSign();
1729 return Result;
1730 }
1731 case TargetOpcode::G_FPTRUNC: {
1732 bool Unused;
1733 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
1734 Result.convert(ToSemantics: getFltSemanticForLLT(Ty: DstTy), RM: APFloat::rmNearestTiesToEven,
1735 losesInfo: &Unused);
1736 return Result;
1737 }
1738 case TargetOpcode::G_FSQRT: {
1739 bool Unused;
1740 Result.convert(ToSemantics: APFloat::IEEEdouble(), RM: APFloat::rmNearestTiesToEven,
1741 losesInfo: &Unused);
1742 Result = APFloat(sqrt(x: Result.convertToDouble()));
1743 break;
1744 }
1745 case TargetOpcode::G_FLOG2: {
1746 bool Unused;
1747 Result.convert(ToSemantics: APFloat::IEEEdouble(), RM: APFloat::rmNearestTiesToEven,
1748 losesInfo: &Unused);
1749 Result = APFloat(log2(x: Result.convertToDouble()));
1750 break;
1751 }
1752 }
1753 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1754 // `buildFConstant` will assert on size mismatch. Only `G_FSQRT`, and
1755 // `G_FLOG2` reach here.
1756 bool Unused;
1757 Result.convert(ToSemantics: Val.getSemantics(), RM: APFloat::rmNearestTiesToEven, losesInfo: &Unused);
1758 return Result;
1759}
1760
1761void CombinerHelper::applyCombineConstantFoldFpUnary(
1762 MachineInstr &MI, const ConstantFP *Cst) const {
1763 APFloat Folded = constantFoldFpUnary(MI, MRI, Val: Cst->getValue());
1764 const ConstantFP *NewCst = ConstantFP::get(Context&: Builder.getContext(), V: Folded);
1765 Builder.buildFConstant(Res: MI.getOperand(i: 0), Val: *NewCst);
1766 MI.eraseFromParent();
1767}
1768
1769bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1770 PtrAddChain &MatchInfo) const {
1771 // We're trying to match the following pattern:
1772 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1773 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1774 // -->
1775 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1776
1777 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1778 return false;
1779
1780 Register Add2 = MI.getOperand(i: 1).getReg();
1781 Register Imm1 = MI.getOperand(i: 2).getReg();
1782 auto MaybeImmVal = getIConstantVRegValWithLookThrough(VReg: Imm1, MRI);
1783 if (!MaybeImmVal)
1784 return false;
1785
1786 MachineInstr *Add2Def = MRI.getVRegDef(Reg: Add2);
1787 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1788 return false;
1789
1790 Register Base = Add2Def->getOperand(i: 1).getReg();
1791 Register Imm2 = Add2Def->getOperand(i: 2).getReg();
1792 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(VReg: Imm2, MRI);
1793 if (!MaybeImm2Val)
1794 return false;
1795
1796 // Check if the new combined immediate forms an illegal addressing mode.
1797 // Do not combine if it was legal before but would get illegal.
1798 // To do so, we need to find a load/store user of the pointer to get
1799 // the access type.
1800 Type *AccessTy = nullptr;
1801 auto &MF = *MI.getMF();
1802 for (auto &UseMI : MRI.use_nodbg_instructions(Reg: MI.getOperand(i: 0).getReg())) {
1803 if (auto *LdSt = dyn_cast<GLoadStore>(Val: &UseMI)) {
1804 AccessTy = getTypeForLLT(Ty: MRI.getType(Reg: LdSt->getReg(Idx: 0)),
1805 C&: MF.getFunction().getContext());
1806 break;
1807 }
1808 }
1809 TargetLoweringBase::AddrMode AMNew;
1810 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1811 AMNew.BaseOffs = CombinedImm.getSExtValue();
1812 if (AccessTy) {
1813 AMNew.HasBaseReg = true;
1814 TargetLoweringBase::AddrMode AMOld;
1815 AMOld.BaseOffs = MaybeImmVal->Value.getSExtValue();
1816 AMOld.HasBaseReg = true;
1817 unsigned AS = MRI.getType(Reg: Add2).getAddressSpace();
1818 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1819 if (TLI.isLegalAddressingMode(DL: MF.getDataLayout(), AM: AMOld, Ty: AccessTy, AddrSpace: AS) &&
1820 !TLI.isLegalAddressingMode(DL: MF.getDataLayout(), AM: AMNew, Ty: AccessTy, AddrSpace: AS))
1821 return false;
1822 }
1823
1824 // Pass the combined immediate to the apply function.
1825 MatchInfo.Imm = AMNew.BaseOffs;
1826 MatchInfo.Base = Base;
1827 MatchInfo.Bank = getRegBank(Reg: Imm2);
1828 return true;
1829}
1830
1831void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1832 PtrAddChain &MatchInfo) const {
1833 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1834 MachineIRBuilder MIB(MI);
1835 LLT OffsetTy = MRI.getType(Reg: MI.getOperand(i: 2).getReg());
1836 auto NewOffset = MIB.buildConstant(Res: OffsetTy, Val: MatchInfo.Imm);
1837 setRegBank(Reg: NewOffset.getReg(Idx: 0), RegBank: MatchInfo.Bank);
1838 Observer.changingInstr(MI);
1839 MI.getOperand(i: 1).setReg(MatchInfo.Base);
1840 MI.getOperand(i: 2).setReg(NewOffset.getReg(Idx: 0));
1841 Observer.changedInstr(MI);
1842}
1843
1844bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1845 RegisterImmPair &MatchInfo) const {
1846 // We're trying to match the following pattern with any of
1847 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1848 // %t1 = SHIFT %base, G_CONSTANT imm1
1849 // %root = SHIFT %t1, G_CONSTANT imm2
1850 // -->
1851 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1852
1853 unsigned Opcode = MI.getOpcode();
1854 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1855 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1856 Opcode == TargetOpcode::G_USHLSAT) &&
1857 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1858
1859 Register Shl2 = MI.getOperand(i: 1).getReg();
1860 Register Imm1 = MI.getOperand(i: 2).getReg();
1861 auto MaybeImmVal = getIConstantVRegValWithLookThrough(VReg: Imm1, MRI);
1862 if (!MaybeImmVal)
1863 return false;
1864
1865 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Reg: Shl2);
1866 if (Shl2Def->getOpcode() != Opcode)
1867 return false;
1868
1869 Register Base = Shl2Def->getOperand(i: 1).getReg();
1870 Register Imm2 = Shl2Def->getOperand(i: 2).getReg();
1871 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(VReg: Imm2, MRI);
1872 if (!MaybeImm2Val)
1873 return false;
1874
1875 // Pass the combined immediate to the apply function.
1876 MatchInfo.Imm =
1877 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
1878 MatchInfo.Reg = Base;
1879
1880 // There is no simple replacement for a saturating unsigned left shift that
1881 // exceeds the scalar size.
1882 if (Opcode == TargetOpcode::G_USHLSAT &&
1883 MatchInfo.Imm >= MRI.getType(Reg: Shl2).getScalarSizeInBits())
1884 return false;
1885
1886 return true;
1887}
1888
1889void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1890 RegisterImmPair &MatchInfo) const {
1891 unsigned Opcode = MI.getOpcode();
1892 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1893 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1894 Opcode == TargetOpcode::G_USHLSAT) &&
1895 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1896
1897 LLT Ty = MRI.getType(Reg: MI.getOperand(i: 1).getReg());
1898 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1899 auto Imm = MatchInfo.Imm;
1900
1901 if (Imm >= ScalarSizeInBits) {
1902 // Any logical shift that exceeds scalar size will produce zero.
1903 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1904 Builder.buildConstant(Res: MI.getOperand(i: 0), Val: 0);
1905 MI.eraseFromParent();
1906 return;
1907 }
1908 // Arithmetic shift and saturating signed left shift have no effect beyond
1909 // scalar size.
1910 Imm = ScalarSizeInBits - 1;
1911 }
1912
1913 LLT ImmTy = MRI.getType(Reg: MI.getOperand(i: 2).getReg());
1914 Register NewImm = Builder.buildConstant(Res: ImmTy, Val: Imm).getReg(Idx: 0);
1915 Observer.changingInstr(MI);
1916 MI.getOperand(i: 1).setReg(MatchInfo.Reg);
1917 MI.getOperand(i: 2).setReg(NewImm);
1918 Observer.changedInstr(MI);
1919}
1920
1921bool CombinerHelper::matchShiftOfShiftedLogic(
1922 MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const {
1923 // We're trying to match the following pattern with any of
1924 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1925 // with any of G_AND/G_OR/G_XOR logic instructions.
1926 // %t1 = SHIFT %X, G_CONSTANT C0
1927 // %t2 = LOGIC %t1, %Y
1928 // %root = SHIFT %t2, G_CONSTANT C1
1929 // -->
1930 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1931 // %t4 = SHIFT %Y, G_CONSTANT C1
1932 // %root = LOGIC %t3, %t4
1933 unsigned ShiftOpcode = MI.getOpcode();
1934 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1935 ShiftOpcode == TargetOpcode::G_ASHR ||
1936 ShiftOpcode == TargetOpcode::G_LSHR ||
1937 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1938 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1939 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1940
1941 // Match a one-use bitwise logic op.
1942 Register LogicDest = MI.getOperand(i: 1).getReg();
1943 if (!MRI.hasOneNonDBGUse(RegNo: LogicDest))
1944 return false;
1945
1946 MachineInstr *LogicMI = MRI.getUniqueVRegDef(Reg: LogicDest);
1947 unsigned LogicOpcode = LogicMI->getOpcode();
1948 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1949 LogicOpcode != TargetOpcode::G_XOR)
1950 return false;
1951
1952 // Find a matching one-use shift by constant.
1953 const Register C1 = MI.getOperand(i: 2).getReg();
1954 auto MaybeImmVal = getIConstantVRegValWithLookThrough(VReg: C1, MRI);
1955 if (!MaybeImmVal || MaybeImmVal->Value == 0)
1956 return false;
1957
1958 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1959
1960 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1961 // Shift should match previous one and should be a one-use.
1962 if (MI->getOpcode() != ShiftOpcode ||
1963 !MRI.hasOneNonDBGUse(RegNo: MI->getOperand(i: 0).getReg()))
1964 return false;
1965
1966 // Must be a constant.
1967 auto MaybeImmVal =
1968 getIConstantVRegValWithLookThrough(VReg: MI->getOperand(i: 2).getReg(), MRI);
1969 if (!MaybeImmVal)
1970 return false;
1971
1972 ShiftVal = MaybeImmVal->Value.getSExtValue();
1973 return true;
1974 };
1975
1976 // Logic ops are commutative, so check each operand for a match.
1977 Register LogicMIReg1 = LogicMI->getOperand(i: 1).getReg();
1978 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(Reg: LogicMIReg1);
1979 Register LogicMIReg2 = LogicMI->getOperand(i: 2).getReg();
1980 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(Reg: LogicMIReg2);
1981 uint64_t C0Val;
1982
1983 if (matchFirstShift(LogicMIOp1, C0Val)) {
1984 MatchInfo.LogicNonShiftReg = LogicMIReg2;
1985 MatchInfo.Shift2 = LogicMIOp1;
1986 } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1987 MatchInfo.LogicNonShiftReg = LogicMIReg1;
1988 MatchInfo.Shift2 = LogicMIOp2;
1989 } else
1990 return false;
1991
1992 MatchInfo.ValSum = C0Val + C1Val;
1993
1994 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1995 if (MatchInfo.ValSum >= MRI.getType(Reg: LogicDest).getScalarSizeInBits())
1996 return false;
1997
1998 MatchInfo.Logic = LogicMI;
1999 return true;
2000}
2001
2002void CombinerHelper::applyShiftOfShiftedLogic(
2003 MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const {
2004 unsigned Opcode = MI.getOpcode();
2005 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
2006 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
2007 Opcode == TargetOpcode::G_SSHLSAT) &&
2008 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
2009
2010 LLT ShlType = MRI.getType(Reg: MI.getOperand(i: 2).getReg());
2011 LLT DestType = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2012
2013 Register Const = Builder.buildConstant(Res: ShlType, Val: MatchInfo.ValSum).getReg(Idx: 0);
2014
2015 Register Shift1Base = MatchInfo.Shift2->getOperand(i: 1).getReg();
2016 Register Shift1 =
2017 Builder.buildInstr(Opc: Opcode, DstOps: {DestType}, SrcOps: {Shift1Base, Const}).getReg(Idx: 0);
2018
2019 // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same
2020 // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when
2021 // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we
2022 // remove old shift1. And it will cause crash later. So erase it earlier to
2023 // avoid the crash.
2024 MatchInfo.Shift2->eraseFromParent();
2025
2026 Register Shift2Const = MI.getOperand(i: 2).getReg();
2027 Register Shift2 = Builder
2028 .buildInstr(Opc: Opcode, DstOps: {DestType},
2029 SrcOps: {MatchInfo.LogicNonShiftReg, Shift2Const})
2030 .getReg(Idx: 0);
2031
2032 Register Dest = MI.getOperand(i: 0).getReg();
2033 Builder.buildInstr(Opc: MatchInfo.Logic->getOpcode(), DstOps: {Dest}, SrcOps: {Shift1, Shift2});
2034
2035 // This was one use so it's safe to remove it.
2036 MatchInfo.Logic->eraseFromParent();
2037
2038 MI.eraseFromParent();
2039}
2040
2041bool CombinerHelper::matchCommuteShift(MachineInstr &MI,
2042 BuildFnTy &MatchInfo) const {
2043 assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL");
2044 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
2045 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
2046 auto &Shl = cast<GenericMachineInstr>(Val&: MI);
2047 Register DstReg = Shl.getReg(Idx: 0);
2048 Register SrcReg = Shl.getReg(Idx: 1);
2049 Register ShiftReg = Shl.getReg(Idx: 2);
2050 Register X, C1;
2051
2052 if (!getTargetLowering().isDesirableToCommuteWithShift(MI, IsAfterLegal: !isPreLegalize()))
2053 return false;
2054
2055 if (!mi_match(R: SrcReg, MRI,
2056 P: m_OneNonDBGUse(SP: m_any_of(preds: m_GAdd(L: m_Reg(R&: X), R: m_Reg(R&: C1)),
2057 preds: m_GOr(L: m_Reg(R&: X), R: m_Reg(R&: C1))))))
2058 return false;
2059
2060 APInt C1Val, C2Val;
2061 if (!mi_match(R: C1, MRI, P: m_ICstOrSplat(Cst&: C1Val)) ||
2062 !mi_match(R: ShiftReg, MRI, P: m_ICstOrSplat(Cst&: C2Val)))
2063 return false;
2064
2065 auto *SrcDef = MRI.getVRegDef(Reg: SrcReg);
2066 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
2067 SrcDef->getOpcode() == TargetOpcode::G_OR) && "Unexpected op");
2068 LLT SrcTy = MRI.getType(Reg: SrcReg);
2069 MatchInfo = [=](MachineIRBuilder &B) {
2070 auto S1 = B.buildShl(Dst: SrcTy, Src0: X, Src1: ShiftReg);
2071 auto S2 = B.buildShl(Dst: SrcTy, Src0: C1, Src1: ShiftReg);
2072 B.buildInstr(Opc: SrcDef->getOpcode(), DstOps: {DstReg}, SrcOps: {S1, S2});
2073 };
2074 return true;
2075}
2076
2077bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
2078 unsigned &ShiftVal) const {
2079 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2080 auto MaybeImmVal =
2081 getIConstantVRegValWithLookThrough(VReg: MI.getOperand(i: 2).getReg(), MRI);
2082 if (!MaybeImmVal)
2083 return false;
2084
2085 ShiftVal = MaybeImmVal->Value.exactLogBase2();
2086 return (static_cast<int32_t>(ShiftVal) != -1);
2087}
2088
2089void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
2090 unsigned &ShiftVal) const {
2091 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2092 MachineIRBuilder MIB(MI);
2093 LLT ShiftTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2094 auto ShiftCst = MIB.buildConstant(Res: ShiftTy, Val: ShiftVal);
2095 Observer.changingInstr(MI);
2096 MI.setDesc(MIB.getTII().get(Opcode: TargetOpcode::G_SHL));
2097 MI.getOperand(i: 2).setReg(ShiftCst.getReg(Idx: 0));
2098 if (ShiftVal == ShiftTy.getScalarSizeInBits() - 1)
2099 MI.clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
2100 Observer.changedInstr(MI);
2101}
2102
2103bool CombinerHelper::matchCombineSubToAdd(MachineInstr &MI,
2104 BuildFnTy &MatchInfo) const {
2105 GSub &Sub = cast<GSub>(Val&: MI);
2106
2107 LLT Ty = MRI.getType(Reg: Sub.getReg(Idx: 0));
2108
2109 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_ADD, {Ty}}))
2110 return false;
2111
2112 if (!isConstantLegalOrBeforeLegalizer(Ty))
2113 return false;
2114
2115 APInt Imm = getIConstantFromReg(VReg: Sub.getRHSReg(), MRI);
2116
2117 MatchInfo = [=, &MI](MachineIRBuilder &B) {
2118 auto NegCst = B.buildConstant(Res: Ty, Val: -Imm);
2119 Observer.changingInstr(MI);
2120 MI.setDesc(B.getTII().get(Opcode: TargetOpcode::G_ADD));
2121 MI.getOperand(i: 2).setReg(NegCst.getReg(Idx: 0));
2122 MI.clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
2123 if (Imm.isMinSignedValue())
2124 MI.clearFlags(flags: MachineInstr::MIFlag::NoSWrap);
2125 Observer.changedInstr(MI);
2126 };
2127 return true;
2128}
2129
2130// shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
2131bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
2132 RegisterImmPair &MatchData) const {
2133 assert(MI.getOpcode() == TargetOpcode::G_SHL && VT);
2134 if (!getTargetLowering().isDesirableToPullExtFromShl(MI))
2135 return false;
2136
2137 Register LHS = MI.getOperand(i: 1).getReg();
2138
2139 Register ExtSrc;
2140 if (!mi_match(R: LHS, MRI, P: m_GAnyExt(Src: m_Reg(R&: ExtSrc))) &&
2141 !mi_match(R: LHS, MRI, P: m_GZExt(Src: m_Reg(R&: ExtSrc))) &&
2142 !mi_match(R: LHS, MRI, P: m_GSExt(Src: m_Reg(R&: ExtSrc))))
2143 return false;
2144
2145 Register RHS = MI.getOperand(i: 2).getReg();
2146 MachineInstr *MIShiftAmt = MRI.getVRegDef(Reg: RHS);
2147 auto MaybeShiftAmtVal = isConstantOrConstantSplatVector(MI&: *MIShiftAmt, MRI);
2148 if (!MaybeShiftAmtVal)
2149 return false;
2150
2151 if (LI) {
2152 LLT SrcTy = MRI.getType(Reg: ExtSrc);
2153
2154 // We only really care about the legality with the shifted value. We can
2155 // pick any type the constant shift amount, so ask the target what to
2156 // use. Otherwise we would have to guess and hope it is reported as legal.
2157 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: SrcTy);
2158 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
2159 return false;
2160 }
2161
2162 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
2163 MatchData.Reg = ExtSrc;
2164 MatchData.Imm = ShiftAmt;
2165
2166 unsigned MinLeadingZeros = VT->getKnownZeroes(R: ExtSrc).countl_one();
2167 unsigned SrcTySize = MRI.getType(Reg: ExtSrc).getScalarSizeInBits();
2168 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
2169}
2170
2171void CombinerHelper::applyCombineShlOfExtend(
2172 MachineInstr &MI, const RegisterImmPair &MatchData) const {
2173 Register ExtSrcReg = MatchData.Reg;
2174 int64_t ShiftAmtVal = MatchData.Imm;
2175
2176 LLT ExtSrcTy = MRI.getType(Reg: ExtSrcReg);
2177 auto ShiftAmt = Builder.buildConstant(Res: ExtSrcTy, Val: ShiftAmtVal);
2178 auto NarrowShift =
2179 Builder.buildShl(Dst: ExtSrcTy, Src0: ExtSrcReg, Src1: ShiftAmt, Flags: MI.getFlags());
2180 Builder.buildZExt(Res: MI.getOperand(i: 0), Op: NarrowShift);
2181 MI.eraseFromParent();
2182}
2183
2184bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
2185 Register &MatchInfo) const {
2186 GMerge &Merge = cast<GMerge>(Val&: MI);
2187 SmallVector<Register, 16> MergedValues;
2188 for (unsigned I = 0; I < Merge.getNumSources(); ++I)
2189 MergedValues.emplace_back(Args: Merge.getSourceReg(I));
2190
2191 auto *Unmerge = getOpcodeDef<GUnmerge>(Reg: MergedValues[0], MRI);
2192 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
2193 return false;
2194
2195 for (unsigned I = 0; I < MergedValues.size(); ++I)
2196 if (MergedValues[I] != Unmerge->getReg(Idx: I))
2197 return false;
2198
2199 MatchInfo = Unmerge->getSourceReg();
2200 return true;
2201}
2202
2203static Register peekThroughBitcast(Register Reg,
2204 const MachineRegisterInfo &MRI) {
2205 while (mi_match(R: Reg, MRI, P: m_GBitcast(Src: m_Reg(R&: Reg))))
2206 ;
2207
2208 return Reg;
2209}
2210
2211bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
2212 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const {
2213 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2214 "Expected an unmerge");
2215 auto &Unmerge = cast<GUnmerge>(Val&: MI);
2216 Register SrcReg = peekThroughBitcast(Reg: Unmerge.getSourceReg(), MRI);
2217
2218 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(Reg: SrcReg, MRI);
2219 if (!SrcInstr)
2220 return false;
2221
2222 // Check the source type of the merge.
2223 LLT SrcMergeTy = MRI.getType(Reg: SrcInstr->getSourceReg(I: 0));
2224 LLT Dst0Ty = MRI.getType(Reg: Unmerge.getReg(Idx: 0));
2225 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
2226 if (SrcMergeTy != Dst0Ty && !SameSize)
2227 return false;
2228 // They are the same now (modulo a bitcast).
2229 // We can collect all the src registers.
2230 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
2231 Operands.push_back(Elt: SrcInstr->getSourceReg(I: Idx));
2232 return true;
2233}
2234
2235void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
2236 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const {
2237 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2238 "Expected an unmerge");
2239 assert((MI.getNumOperands() - 1 == Operands.size()) &&
2240 "Not enough operands to replace all defs");
2241 unsigned NumElems = MI.getNumOperands() - 1;
2242
2243 LLT SrcTy = MRI.getType(Reg: Operands[0]);
2244 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2245 bool CanReuseInputDirectly = DstTy == SrcTy;
2246 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2247 Register DstReg = MI.getOperand(i: Idx).getReg();
2248 Register SrcReg = Operands[Idx];
2249
2250 // This combine may run after RegBankSelect, so we need to be aware of
2251 // register banks.
2252 const auto &DstCB = MRI.getRegClassOrRegBank(Reg: DstReg);
2253 if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(Reg: SrcReg)) {
2254 SrcReg = Builder.buildCopy(Res: MRI.getType(Reg: SrcReg), Op: SrcReg).getReg(Idx: 0);
2255 MRI.setRegClassOrRegBank(Reg: SrcReg, RCOrRB: DstCB);
2256 }
2257
2258 if (CanReuseInputDirectly)
2259 replaceRegWith(MRI, FromReg: DstReg, ToReg: SrcReg);
2260 else
2261 Builder.buildCast(Dst: DstReg, Src: SrcReg);
2262 }
2263 MI.eraseFromParent();
2264}
2265
2266bool CombinerHelper::matchCombineUnmergeConstant(
2267 MachineInstr &MI, SmallVectorImpl<APInt> &Csts) const {
2268 unsigned SrcIdx = MI.getNumOperands() - 1;
2269 Register SrcReg = MI.getOperand(i: SrcIdx).getReg();
2270 MachineInstr *SrcInstr = MRI.getVRegDef(Reg: SrcReg);
2271 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
2272 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
2273 return false;
2274 // Break down the big constant in smaller ones.
2275 const MachineOperand &CstVal = SrcInstr->getOperand(i: 1);
2276 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
2277 ? CstVal.getCImm()->getValue()
2278 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
2279
2280 LLT Dst0Ty = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2281 unsigned ShiftAmt = Dst0Ty.getSizeInBits();
2282 // Unmerge a constant.
2283 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
2284 Csts.emplace_back(Args: Val.trunc(width: ShiftAmt));
2285 Val = Val.lshr(shiftAmt: ShiftAmt);
2286 }
2287
2288 return true;
2289}
2290
2291void CombinerHelper::applyCombineUnmergeConstant(
2292 MachineInstr &MI, SmallVectorImpl<APInt> &Csts) const {
2293 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2294 "Expected an unmerge");
2295 assert((MI.getNumOperands() - 1 == Csts.size()) &&
2296 "Not enough operands to replace all defs");
2297 unsigned NumElems = MI.getNumOperands() - 1;
2298 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2299 Register DstReg = MI.getOperand(i: Idx).getReg();
2300 Builder.buildConstant(Res: DstReg, Val: Csts[Idx]);
2301 }
2302
2303 MI.eraseFromParent();
2304}
2305
2306bool CombinerHelper::matchCombineUnmergeUndef(
2307 MachineInstr &MI,
2308 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
2309 unsigned SrcIdx = MI.getNumOperands() - 1;
2310 Register SrcReg = MI.getOperand(i: SrcIdx).getReg();
2311 MatchInfo = [&MI](MachineIRBuilder &B) {
2312 unsigned NumElems = MI.getNumOperands() - 1;
2313 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2314 Register DstReg = MI.getOperand(i: Idx).getReg();
2315 B.buildUndef(Res: DstReg);
2316 }
2317 };
2318 return isa<GImplicitDef>(Val: MRI.getVRegDef(Reg: SrcReg));
2319}
2320
2321bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(
2322 MachineInstr &MI) const {
2323 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2324 "Expected an unmerge");
2325 if (MRI.getType(Reg: MI.getOperand(i: 0).getReg()).isVector() ||
2326 MRI.getType(Reg: MI.getOperand(i: MI.getNumDefs()).getReg()).isVector())
2327 return false;
2328 // Check that all the lanes are dead except the first one.
2329 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2330 if (!MRI.use_nodbg_empty(RegNo: MI.getOperand(i: Idx).getReg()))
2331 return false;
2332 }
2333 return true;
2334}
2335
2336void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(
2337 MachineInstr &MI) const {
2338 Register SrcReg = MI.getOperand(i: MI.getNumDefs()).getReg();
2339 Register Dst0Reg = MI.getOperand(i: 0).getReg();
2340 Builder.buildTrunc(Res: Dst0Reg, Op: SrcReg);
2341 MI.eraseFromParent();
2342}
2343
2344bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) const {
2345 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2346 "Expected an unmerge");
2347 Register Dst0Reg = MI.getOperand(i: 0).getReg();
2348 LLT Dst0Ty = MRI.getType(Reg: Dst0Reg);
2349 // G_ZEXT on vector applies to each lane, so it will
2350 // affect all destinations. Therefore we won't be able
2351 // to simplify the unmerge to just the first definition.
2352 if (Dst0Ty.isVector())
2353 return false;
2354 Register SrcReg = MI.getOperand(i: MI.getNumDefs()).getReg();
2355 LLT SrcTy = MRI.getType(Reg: SrcReg);
2356 if (SrcTy.isVector())
2357 return false;
2358
2359 Register ZExtSrcReg;
2360 if (!mi_match(R: SrcReg, MRI, P: m_GZExt(Src: m_Reg(R&: ZExtSrcReg))))
2361 return false;
2362
2363 // Finally we can replace the first definition with
2364 // a zext of the source if the definition is big enough to hold
2365 // all of ZExtSrc bits.
2366 LLT ZExtSrcTy = MRI.getType(Reg: ZExtSrcReg);
2367 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
2368}
2369
2370void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) const {
2371 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2372 "Expected an unmerge");
2373
2374 Register Dst0Reg = MI.getOperand(i: 0).getReg();
2375
2376 MachineInstr *ZExtInstr =
2377 MRI.getVRegDef(Reg: MI.getOperand(i: MI.getNumDefs()).getReg());
2378 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
2379 "Expecting a G_ZEXT");
2380
2381 Register ZExtSrcReg = ZExtInstr->getOperand(i: 1).getReg();
2382 LLT Dst0Ty = MRI.getType(Reg: Dst0Reg);
2383 LLT ZExtSrcTy = MRI.getType(Reg: ZExtSrcReg);
2384
2385 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
2386 Builder.buildZExt(Res: Dst0Reg, Op: ZExtSrcReg);
2387 } else {
2388 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
2389 "ZExt src doesn't fit in destination");
2390 replaceRegWith(MRI, FromReg: Dst0Reg, ToReg: ZExtSrcReg);
2391 }
2392
2393 Register ZeroReg;
2394 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2395 if (!ZeroReg)
2396 ZeroReg = Builder.buildConstant(Res: Dst0Ty, Val: 0).getReg(Idx: 0);
2397 replaceRegWith(MRI, FromReg: MI.getOperand(i: Idx).getReg(), ToReg: ZeroReg);
2398 }
2399 MI.eraseFromParent();
2400}
2401
2402bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
2403 unsigned TargetShiftSize,
2404 unsigned &ShiftVal) const {
2405 assert((MI.getOpcode() == TargetOpcode::G_SHL ||
2406 MI.getOpcode() == TargetOpcode::G_LSHR ||
2407 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
2408
2409 LLT Ty = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2410 if (Ty.isVector()) // TODO:
2411 return false;
2412
2413 // Don't narrow further than the requested size.
2414 unsigned Size = Ty.getSizeInBits();
2415 if (Size <= TargetShiftSize)
2416 return false;
2417
2418 auto MaybeImmVal =
2419 getIConstantVRegValWithLookThrough(VReg: MI.getOperand(i: 2).getReg(), MRI);
2420 if (!MaybeImmVal)
2421 return false;
2422
2423 ShiftVal = MaybeImmVal->Value.getSExtValue();
2424 return ShiftVal >= Size / 2 && ShiftVal < Size;
2425}
2426
2427void CombinerHelper::applyCombineShiftToUnmerge(
2428 MachineInstr &MI, const unsigned &ShiftVal) const {
2429 Register DstReg = MI.getOperand(i: 0).getReg();
2430 Register SrcReg = MI.getOperand(i: 1).getReg();
2431 LLT Ty = MRI.getType(Reg: SrcReg);
2432 unsigned Size = Ty.getSizeInBits();
2433 unsigned HalfSize = Size / 2;
2434 assert(ShiftVal >= HalfSize);
2435
2436 LLT HalfTy = LLT::scalar(SizeInBits: HalfSize);
2437
2438 auto Unmerge = Builder.buildUnmerge(Res: HalfTy, Op: SrcReg);
2439 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2440
2441 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2442 Register Narrowed = Unmerge.getReg(Idx: 1);
2443
2444 // dst = G_LSHR s64:x, C for C >= 32
2445 // =>
2446 // lo, hi = G_UNMERGE_VALUES x
2447 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2448
2449 if (NarrowShiftAmt != 0) {
2450 Narrowed = Builder.buildLShr(Dst: HalfTy, Src0: Narrowed,
2451 Src1: Builder.buildConstant(Res: HalfTy, Val: NarrowShiftAmt)).getReg(Idx: 0);
2452 }
2453
2454 auto Zero = Builder.buildConstant(Res: HalfTy, Val: 0);
2455 Builder.buildMergeLikeInstr(Res: DstReg, Ops: {Narrowed, Zero});
2456 } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2457 Register Narrowed = Unmerge.getReg(Idx: 0);
2458 // dst = G_SHL s64:x, C for C >= 32
2459 // =>
2460 // lo, hi = G_UNMERGE_VALUES x
2461 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2462 if (NarrowShiftAmt != 0) {
2463 Narrowed = Builder.buildShl(Dst: HalfTy, Src0: Narrowed,
2464 Src1: Builder.buildConstant(Res: HalfTy, Val: NarrowShiftAmt)).getReg(Idx: 0);
2465 }
2466
2467 auto Zero = Builder.buildConstant(Res: HalfTy, Val: 0);
2468 Builder.buildMergeLikeInstr(Res: DstReg, Ops: {Zero, Narrowed});
2469 } else {
2470 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2471 auto Hi = Builder.buildAShr(
2472 Dst: HalfTy, Src0: Unmerge.getReg(Idx: 1),
2473 Src1: Builder.buildConstant(Res: HalfTy, Val: HalfSize - 1));
2474
2475 if (ShiftVal == HalfSize) {
2476 // (G_ASHR i64:x, 32) ->
2477 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2478 Builder.buildMergeLikeInstr(Res: DstReg, Ops: {Unmerge.getReg(Idx: 1), Hi});
2479 } else if (ShiftVal == Size - 1) {
2480 // Don't need a second shift.
2481 // (G_ASHR i64:x, 63) ->
2482 // %narrowed = (G_ASHR hi_32(x), 31)
2483 // G_MERGE_VALUES %narrowed, %narrowed
2484 Builder.buildMergeLikeInstr(Res: DstReg, Ops: {Hi, Hi});
2485 } else {
2486 auto Lo = Builder.buildAShr(
2487 Dst: HalfTy, Src0: Unmerge.getReg(Idx: 1),
2488 Src1: Builder.buildConstant(Res: HalfTy, Val: ShiftVal - HalfSize));
2489
2490 // (G_ASHR i64:x, C) ->, for C >= 32
2491 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2492 Builder.buildMergeLikeInstr(Res: DstReg, Ops: {Lo, Hi});
2493 }
2494 }
2495
2496 MI.eraseFromParent();
2497}
2498
2499bool CombinerHelper::tryCombineShiftToUnmerge(
2500 MachineInstr &MI, unsigned TargetShiftAmount) const {
2501 unsigned ShiftAmt;
2502 if (matchCombineShiftToUnmerge(MI, TargetShiftSize: TargetShiftAmount, ShiftVal&: ShiftAmt)) {
2503 applyCombineShiftToUnmerge(MI, ShiftVal: ShiftAmt);
2504 return true;
2505 }
2506
2507 return false;
2508}
2509
2510bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI,
2511 Register &Reg) const {
2512 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2513 Register DstReg = MI.getOperand(i: 0).getReg();
2514 LLT DstTy = MRI.getType(Reg: DstReg);
2515 Register SrcReg = MI.getOperand(i: 1).getReg();
2516 return mi_match(R: SrcReg, MRI,
2517 P: m_GPtrToInt(Src: m_all_of(preds: m_SpecificType(Ty: DstTy), preds: m_Reg(R&: Reg))));
2518}
2519
2520void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI,
2521 Register &Reg) const {
2522 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2523 Register DstReg = MI.getOperand(i: 0).getReg();
2524 Builder.buildCopy(Res: DstReg, Op: Reg);
2525 MI.eraseFromParent();
2526}
2527
2528void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI,
2529 Register &Reg) const {
2530 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2531 Register DstReg = MI.getOperand(i: 0).getReg();
2532 Builder.buildZExtOrTrunc(Res: DstReg, Op: Reg);
2533 MI.eraseFromParent();
2534}
2535
2536bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2537 MachineInstr &MI, std::pair<Register, bool> &PtrReg) const {
2538 assert(MI.getOpcode() == TargetOpcode::G_ADD);
2539 Register LHS = MI.getOperand(i: 1).getReg();
2540 Register RHS = MI.getOperand(i: 2).getReg();
2541 LLT IntTy = MRI.getType(Reg: LHS);
2542
2543 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2544 // instruction.
2545 PtrReg.second = false;
2546 for (Register SrcReg : {LHS, RHS}) {
2547 if (mi_match(R: SrcReg, MRI, P: m_GPtrToInt(Src: m_Reg(R&: PtrReg.first)))) {
2548 // Don't handle cases where the integer is implicitly converted to the
2549 // pointer width.
2550 LLT PtrTy = MRI.getType(Reg: PtrReg.first);
2551 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2552 return true;
2553 }
2554
2555 PtrReg.second = true;
2556 }
2557
2558 return false;
2559}
2560
2561void CombinerHelper::applyCombineAddP2IToPtrAdd(
2562 MachineInstr &MI, std::pair<Register, bool> &PtrReg) const {
2563 Register Dst = MI.getOperand(i: 0).getReg();
2564 Register LHS = MI.getOperand(i: 1).getReg();
2565 Register RHS = MI.getOperand(i: 2).getReg();
2566
2567 const bool DoCommute = PtrReg.second;
2568 if (DoCommute)
2569 std::swap(a&: LHS, b&: RHS);
2570 LHS = PtrReg.first;
2571
2572 LLT PtrTy = MRI.getType(Reg: LHS);
2573
2574 auto PtrAdd = Builder.buildPtrAdd(Res: PtrTy, Op0: LHS, Op1: RHS);
2575 Builder.buildPtrToInt(Dst, Src: PtrAdd);
2576 MI.eraseFromParent();
2577}
2578
2579bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2580 APInt &NewCst) const {
2581 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
2582 Register LHS = PtrAdd.getBaseReg();
2583 Register RHS = PtrAdd.getOffsetReg();
2584 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2585
2586 if (auto RHSCst = getIConstantVRegVal(VReg: RHS, MRI)) {
2587 APInt Cst;
2588 if (mi_match(R: LHS, MRI, P: m_GIntToPtr(Src: m_ICst(Cst)))) {
2589 auto DstTy = MRI.getType(Reg: PtrAdd.getReg(Idx: 0));
2590 // G_INTTOPTR uses zero-extension
2591 NewCst = Cst.zextOrTrunc(width: DstTy.getSizeInBits());
2592 NewCst += RHSCst->sextOrTrunc(width: DstTy.getSizeInBits());
2593 return true;
2594 }
2595 }
2596
2597 return false;
2598}
2599
2600void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2601 APInt &NewCst) const {
2602 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
2603 Register Dst = PtrAdd.getReg(Idx: 0);
2604
2605 Builder.buildConstant(Res: Dst, Val: NewCst);
2606 PtrAdd.eraseFromParent();
2607}
2608
2609bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI,
2610 Register &Reg) const {
2611 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2612 Register DstReg = MI.getOperand(i: 0).getReg();
2613 Register SrcReg = MI.getOperand(i: 1).getReg();
2614 Register OriginalSrcReg = getSrcRegIgnoringCopies(Reg: SrcReg, MRI);
2615 if (OriginalSrcReg.isValid())
2616 SrcReg = OriginalSrcReg;
2617 LLT DstTy = MRI.getType(Reg: DstReg);
2618 return mi_match(R: SrcReg, MRI,
2619 P: m_GTrunc(Src: m_all_of(preds: m_Reg(R&: Reg), preds: m_SpecificType(Ty: DstTy)))) &&
2620 canReplaceReg(DstReg, SrcReg: Reg, MRI);
2621}
2622
2623bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI,
2624 Register &Reg) const {
2625 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2626 Register DstReg = MI.getOperand(i: 0).getReg();
2627 Register SrcReg = MI.getOperand(i: 1).getReg();
2628 LLT DstTy = MRI.getType(Reg: DstReg);
2629 if (mi_match(R: SrcReg, MRI,
2630 P: m_GTrunc(Src: m_all_of(preds: m_Reg(R&: Reg), preds: m_SpecificType(Ty: DstTy)))) &&
2631 canReplaceReg(DstReg, SrcReg: Reg, MRI)) {
2632 unsigned DstSize = DstTy.getScalarSizeInBits();
2633 unsigned SrcSize = MRI.getType(Reg: SrcReg).getScalarSizeInBits();
2634 return VT->getKnownBits(R: Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2635 }
2636 return false;
2637}
2638
2639static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
2640 const unsigned ShiftSize = ShiftTy.getScalarSizeInBits();
2641 const unsigned TruncSize = TruncTy.getScalarSizeInBits();
2642
2643 // ShiftTy > 32 > TruncTy -> 32
2644 if (ShiftSize > 32 && TruncSize < 32)
2645 return ShiftTy.changeElementSize(NewEltSize: 32);
2646
2647 // TODO: We could also reduce to 16 bits, but that's more target-dependent.
2648 // Some targets like it, some don't, some only like it under certain
2649 // conditions/processor versions, etc.
2650 // A TL hook might be needed for this.
2651
2652 // Don't combine
2653 return ShiftTy;
2654}
2655
2656bool CombinerHelper::matchCombineTruncOfShift(
2657 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) const {
2658 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2659 Register DstReg = MI.getOperand(i: 0).getReg();
2660 Register SrcReg = MI.getOperand(i: 1).getReg();
2661
2662 if (!MRI.hasOneNonDBGUse(RegNo: SrcReg))
2663 return false;
2664
2665 LLT SrcTy = MRI.getType(Reg: SrcReg);
2666 LLT DstTy = MRI.getType(Reg: DstReg);
2667
2668 MachineInstr *SrcMI = getDefIgnoringCopies(Reg: SrcReg, MRI);
2669 const auto &TL = getTargetLowering();
2670
2671 LLT NewShiftTy;
2672 switch (SrcMI->getOpcode()) {
2673 default:
2674 return false;
2675 case TargetOpcode::G_SHL: {
2676 NewShiftTy = DstTy;
2677
2678 // Make sure new shift amount is legal.
2679 KnownBits Known = VT->getKnownBits(R: SrcMI->getOperand(i: 2).getReg());
2680 if (Known.getMaxValue().uge(RHS: NewShiftTy.getScalarSizeInBits()))
2681 return false;
2682 break;
2683 }
2684 case TargetOpcode::G_LSHR:
2685 case TargetOpcode::G_ASHR: {
2686 // For right shifts, we conservatively do not do the transform if the TRUNC
2687 // has any STORE users. The reason is that if we change the type of the
2688 // shift, we may break the truncstore combine.
2689 //
2690 // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
2691 for (auto &User : MRI.use_instructions(Reg: DstReg))
2692 if (User.getOpcode() == TargetOpcode::G_STORE)
2693 return false;
2694
2695 NewShiftTy = getMidVTForTruncRightShiftCombine(ShiftTy: SrcTy, TruncTy: DstTy);
2696 if (NewShiftTy == SrcTy)
2697 return false;
2698
2699 // Make sure we won't lose information by truncating the high bits.
2700 KnownBits Known = VT->getKnownBits(R: SrcMI->getOperand(i: 2).getReg());
2701 if (Known.getMaxValue().ugt(RHS: NewShiftTy.getScalarSizeInBits() -
2702 DstTy.getScalarSizeInBits()))
2703 return false;
2704 break;
2705 }
2706 }
2707
2708 if (!isLegalOrBeforeLegalizer(
2709 Query: {SrcMI->getOpcode(),
2710 {NewShiftTy, TL.getPreferredShiftAmountTy(ShiftValueTy: NewShiftTy)}}))
2711 return false;
2712
2713 MatchInfo = std::make_pair(x&: SrcMI, y&: NewShiftTy);
2714 return true;
2715}
2716
2717void CombinerHelper::applyCombineTruncOfShift(
2718 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) const {
2719 MachineInstr *ShiftMI = MatchInfo.first;
2720 LLT NewShiftTy = MatchInfo.second;
2721
2722 Register Dst = MI.getOperand(i: 0).getReg();
2723 LLT DstTy = MRI.getType(Reg: Dst);
2724
2725 Register ShiftAmt = ShiftMI->getOperand(i: 2).getReg();
2726 Register ShiftSrc = ShiftMI->getOperand(i: 1).getReg();
2727 ShiftSrc = Builder.buildTrunc(Res: NewShiftTy, Op: ShiftSrc).getReg(Idx: 0);
2728
2729 Register NewShift =
2730 Builder
2731 .buildInstr(Opc: ShiftMI->getOpcode(), DstOps: {NewShiftTy}, SrcOps: {ShiftSrc, ShiftAmt})
2732 .getReg(Idx: 0);
2733
2734 if (NewShiftTy == DstTy)
2735 replaceRegWith(MRI, FromReg: Dst, ToReg: NewShift);
2736 else
2737 Builder.buildTrunc(Res: Dst, Op: NewShift);
2738
2739 eraseInst(MI);
2740}
2741
2742bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) const {
2743 return any_of(Range: MI.explicit_uses(), P: [this](const MachineOperand &MO) {
2744 return MO.isReg() &&
2745 getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: MO.getReg(), MRI);
2746 });
2747}
2748
2749bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) const {
2750 return all_of(Range: MI.explicit_uses(), P: [this](const MachineOperand &MO) {
2751 return !MO.isReg() ||
2752 getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: MO.getReg(), MRI);
2753 });
2754}
2755
2756bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) const {
2757 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2758 ArrayRef<int> Mask = MI.getOperand(i: 3).getShuffleMask();
2759 return all_of(Range&: Mask, P: [](int Elt) { return Elt < 0; });
2760}
2761
2762bool CombinerHelper::matchUndefStore(MachineInstr &MI) const {
2763 assert(MI.getOpcode() == TargetOpcode::G_STORE);
2764 return getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: MI.getOperand(i: 0).getReg(),
2765 MRI);
2766}
2767
2768bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) const {
2769 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2770 return getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: MI.getOperand(i: 1).getReg(),
2771 MRI);
2772}
2773
2774bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(
2775 MachineInstr &MI) const {
2776 assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2777 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2778 "Expected an insert/extract element op");
2779 LLT VecTy = MRI.getType(Reg: MI.getOperand(i: 1).getReg());
2780 if (VecTy.isScalableVector())
2781 return false;
2782
2783 unsigned IdxIdx =
2784 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2785 auto Idx = getIConstantVRegVal(VReg: MI.getOperand(i: IdxIdx).getReg(), MRI);
2786 if (!Idx)
2787 return false;
2788 return Idx->getZExtValue() >= VecTy.getNumElements();
2789}
2790
2791bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI,
2792 unsigned &OpIdx) const {
2793 GSelect &SelMI = cast<GSelect>(Val&: MI);
2794 auto Cst =
2795 isConstantOrConstantSplatVector(MI&: *MRI.getVRegDef(Reg: SelMI.getCondReg()), MRI);
2796 if (!Cst)
2797 return false;
2798 OpIdx = Cst->isZero() ? 3 : 2;
2799 return true;
2800}
2801
2802void CombinerHelper::eraseInst(MachineInstr &MI) const { MI.eraseFromParent(); }
2803
2804bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2805 const MachineOperand &MOP2) const {
2806 if (!MOP1.isReg() || !MOP2.isReg())
2807 return false;
2808 auto InstAndDef1 = getDefSrcRegIgnoringCopies(Reg: MOP1.getReg(), MRI);
2809 if (!InstAndDef1)
2810 return false;
2811 auto InstAndDef2 = getDefSrcRegIgnoringCopies(Reg: MOP2.getReg(), MRI);
2812 if (!InstAndDef2)
2813 return false;
2814 MachineInstr *I1 = InstAndDef1->MI;
2815 MachineInstr *I2 = InstAndDef2->MI;
2816
2817 // Handle a case like this:
2818 //
2819 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2820 //
2821 // Even though %0 and %1 are produced by the same instruction they are not
2822 // the same values.
2823 if (I1 == I2)
2824 return MOP1.getReg() == MOP2.getReg();
2825
2826 // If we have an instruction which loads or stores, we can't guarantee that
2827 // it is identical.
2828 //
2829 // For example, we may have
2830 //
2831 // %x1 = G_LOAD %addr (load N from @somewhere)
2832 // ...
2833 // call @foo
2834 // ...
2835 // %x2 = G_LOAD %addr (load N from @somewhere)
2836 // ...
2837 // %or = G_OR %x1, %x2
2838 //
2839 // It's possible that @foo will modify whatever lives at the address we're
2840 // loading from. To be safe, let's just assume that all loads and stores
2841 // are different (unless we have something which is guaranteed to not
2842 // change.)
2843 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2844 return false;
2845
2846 // If both instructions are loads or stores, they are equal only if both
2847 // are dereferenceable invariant loads with the same number of bits.
2848 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2849 GLoadStore *LS1 = dyn_cast<GLoadStore>(Val: I1);
2850 GLoadStore *LS2 = dyn_cast<GLoadStore>(Val: I2);
2851 if (!LS1 || !LS2)
2852 return false;
2853
2854 if (!I2->isDereferenceableInvariantLoad() ||
2855 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2856 return false;
2857 }
2858
2859 // Check for physical registers on the instructions first to avoid cases
2860 // like this:
2861 //
2862 // %a = COPY $physreg
2863 // ...
2864 // SOMETHING implicit-def $physreg
2865 // ...
2866 // %b = COPY $physreg
2867 //
2868 // These copies are not equivalent.
2869 if (any_of(Range: I1->uses(), P: [](const MachineOperand &MO) {
2870 return MO.isReg() && MO.getReg().isPhysical();
2871 })) {
2872 // Check if we have a case like this:
2873 //
2874 // %a = COPY $physreg
2875 // %b = COPY %a
2876 //
2877 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2878 // From that, we know that they must have the same value, since they must
2879 // have come from the same COPY.
2880 return I1->isIdenticalTo(Other: *I2);
2881 }
2882
2883 // We don't have any physical registers, so we don't necessarily need the
2884 // same vreg defs.
2885 //
2886 // On the off-chance that there's some target instruction feeding into the
2887 // instruction, let's use produceSameValue instead of isIdenticalTo.
2888 if (Builder.getTII().produceSameValue(MI0: *I1, MI1: *I2, MRI: &MRI)) {
2889 // Handle instructions with multiple defs that produce same values. Values
2890 // are same for operands with same index.
2891 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2892 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2893 // I1 and I2 are different instructions but produce same values,
2894 // %1 and %6 are same, %1 and %7 are not the same value.
2895 return I1->findRegisterDefOperandIdx(Reg: InstAndDef1->Reg, /*TRI=*/nullptr) ==
2896 I2->findRegisterDefOperandIdx(Reg: InstAndDef2->Reg, /*TRI=*/nullptr);
2897 }
2898 return false;
2899}
2900
2901bool CombinerHelper::matchConstantOp(const MachineOperand &MOP,
2902 int64_t C) const {
2903 if (!MOP.isReg())
2904 return false;
2905 auto *MI = MRI.getVRegDef(Reg: MOP.getReg());
2906 auto MaybeCst = isConstantOrConstantSplatVector(MI&: *MI, MRI);
2907 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2908 MaybeCst->getSExtValue() == C;
2909}
2910
2911bool CombinerHelper::matchConstantFPOp(const MachineOperand &MOP,
2912 double C) const {
2913 if (!MOP.isReg())
2914 return false;
2915 std::optional<FPValueAndVReg> MaybeCst;
2916 if (!mi_match(R: MOP.getReg(), MRI, P: m_GFCstOrSplat(FPValReg&: MaybeCst)))
2917 return false;
2918
2919 return MaybeCst->Value.isExactlyValue(V: C);
2920}
2921
2922void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2923 unsigned OpIdx) const {
2924 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2925 Register OldReg = MI.getOperand(i: 0).getReg();
2926 Register Replacement = MI.getOperand(i: OpIdx).getReg();
2927 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2928 replaceRegWith(MRI, FromReg: OldReg, ToReg: Replacement);
2929 MI.eraseFromParent();
2930}
2931
2932void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2933 Register Replacement) const {
2934 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2935 Register OldReg = MI.getOperand(i: 0).getReg();
2936 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2937 replaceRegWith(MRI, FromReg: OldReg, ToReg: Replacement);
2938 MI.eraseFromParent();
2939}
2940
2941bool CombinerHelper::matchConstantLargerBitWidth(MachineInstr &MI,
2942 unsigned ConstIdx) const {
2943 Register ConstReg = MI.getOperand(i: ConstIdx).getReg();
2944 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2945
2946 // Get the shift amount
2947 auto VRegAndVal = getIConstantVRegValWithLookThrough(VReg: ConstReg, MRI);
2948 if (!VRegAndVal)
2949 return false;
2950
2951 // Return true of shift amount >= Bitwidth
2952 return (VRegAndVal->Value.uge(RHS: DstTy.getSizeInBits()));
2953}
2954
2955void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) const {
2956 assert((MI.getOpcode() == TargetOpcode::G_FSHL ||
2957 MI.getOpcode() == TargetOpcode::G_FSHR) &&
2958 "This is not a funnel shift operation");
2959
2960 Register ConstReg = MI.getOperand(i: 3).getReg();
2961 LLT ConstTy = MRI.getType(Reg: ConstReg);
2962 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
2963
2964 auto VRegAndVal = getIConstantVRegValWithLookThrough(VReg: ConstReg, MRI);
2965 assert((VRegAndVal) && "Value is not a constant");
2966
2967 // Calculate the new Shift Amount = Old Shift Amount % BitWidth
2968 APInt NewConst = VRegAndVal->Value.urem(
2969 RHS: APInt(ConstTy.getSizeInBits(), DstTy.getScalarSizeInBits()));
2970
2971 auto NewConstInstr = Builder.buildConstant(Res: ConstTy, Val: NewConst.getZExtValue());
2972 Builder.buildInstr(
2973 Opc: MI.getOpcode(), DstOps: {MI.getOperand(i: 0)},
2974 SrcOps: {MI.getOperand(i: 1), MI.getOperand(i: 2), NewConstInstr.getReg(Idx: 0)});
2975
2976 MI.eraseFromParent();
2977}
2978
2979bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) const {
2980 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2981 // Match (cond ? x : x)
2982 return matchEqualDefs(MOP1: MI.getOperand(i: 2), MOP2: MI.getOperand(i: 3)) &&
2983 canReplaceReg(DstReg: MI.getOperand(i: 0).getReg(), SrcReg: MI.getOperand(i: 2).getReg(),
2984 MRI);
2985}
2986
2987bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) const {
2988 return matchEqualDefs(MOP1: MI.getOperand(i: 1), MOP2: MI.getOperand(i: 2)) &&
2989 canReplaceReg(DstReg: MI.getOperand(i: 0).getReg(), SrcReg: MI.getOperand(i: 1).getReg(),
2990 MRI);
2991}
2992
2993bool CombinerHelper::matchOperandIsZero(MachineInstr &MI,
2994 unsigned OpIdx) const {
2995 return matchConstantOp(MOP: MI.getOperand(i: OpIdx), C: 0) &&
2996 canReplaceReg(DstReg: MI.getOperand(i: 0).getReg(), SrcReg: MI.getOperand(i: OpIdx).getReg(),
2997 MRI);
2998}
2999
3000bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
3001 unsigned OpIdx) const {
3002 MachineOperand &MO = MI.getOperand(i: OpIdx);
3003 return MO.isReg() &&
3004 getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: MO.getReg(), MRI);
3005}
3006
3007bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
3008 unsigned OpIdx) const {
3009 MachineOperand &MO = MI.getOperand(i: OpIdx);
3010 return isKnownToBeAPowerOfTwo(Val: MO.getReg(), MRI, ValueTracking: VT);
3011}
3012
3013void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
3014 double C) const {
3015 assert(MI.getNumDefs() == 1 && "Expected only one def?");
3016 Builder.buildFConstant(Res: MI.getOperand(i: 0), Val: C);
3017 MI.eraseFromParent();
3018}
3019
3020void CombinerHelper::replaceInstWithConstant(MachineInstr &MI,
3021 int64_t C) const {
3022 assert(MI.getNumDefs() == 1 && "Expected only one def?");
3023 Builder.buildConstant(Res: MI.getOperand(i: 0), Val: C);
3024 MI.eraseFromParent();
3025}
3026
3027void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) const {
3028 assert(MI.getNumDefs() == 1 && "Expected only one def?");
3029 Builder.buildConstant(Res: MI.getOperand(i: 0), Val: C);
3030 MI.eraseFromParent();
3031}
3032
3033void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
3034 ConstantFP *CFP) const {
3035 assert(MI.getNumDefs() == 1 && "Expected only one def?");
3036 Builder.buildFConstant(Res: MI.getOperand(i: 0), Val: CFP->getValueAPF());
3037 MI.eraseFromParent();
3038}
3039
3040void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) const {
3041 assert(MI.getNumDefs() == 1 && "Expected only one def?");
3042 Builder.buildUndef(Res: MI.getOperand(i: 0));
3043 MI.eraseFromParent();
3044}
3045
3046bool CombinerHelper::matchSimplifyAddToSub(
3047 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) const {
3048 Register LHS = MI.getOperand(i: 1).getReg();
3049 Register RHS = MI.getOperand(i: 2).getReg();
3050 Register &NewLHS = std::get<0>(t&: MatchInfo);
3051 Register &NewRHS = std::get<1>(t&: MatchInfo);
3052
3053 // Helper lambda to check for opportunities for
3054 // ((0-A) + B) -> B - A
3055 // (A + (0-B)) -> A - B
3056 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
3057 if (!mi_match(R: MaybeSub, MRI, P: m_Neg(Src: m_Reg(R&: NewRHS))))
3058 return false;
3059 NewLHS = MaybeNewLHS;
3060 return true;
3061 };
3062
3063 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
3064}
3065
3066bool CombinerHelper::matchCombineInsertVecElts(
3067 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) const {
3068 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
3069 "Invalid opcode");
3070 Register DstReg = MI.getOperand(i: 0).getReg();
3071 LLT DstTy = MRI.getType(Reg: DstReg);
3072 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
3073
3074 if (DstTy.isScalableVector())
3075 return false;
3076
3077 unsigned NumElts = DstTy.getNumElements();
3078 // If this MI is part of a sequence of insert_vec_elts, then
3079 // don't do the combine in the middle of the sequence.
3080 if (MRI.hasOneUse(RegNo: DstReg) && MRI.use_instr_begin(RegNo: DstReg)->getOpcode() ==
3081 TargetOpcode::G_INSERT_VECTOR_ELT)
3082 return false;
3083 MachineInstr *CurrInst = &MI;
3084 MachineInstr *TmpInst;
3085 int64_t IntImm;
3086 Register TmpReg;
3087 MatchInfo.resize(N: NumElts);
3088 while (mi_match(
3089 R: CurrInst->getOperand(i: 0).getReg(), MRI,
3090 P: m_GInsertVecElt(Src0: m_MInstr(MI&: TmpInst), Src1: m_Reg(R&: TmpReg), Src2: m_ICst(Cst&: IntImm)))) {
3091 if (IntImm >= NumElts || IntImm < 0)
3092 return false;
3093 if (!MatchInfo[IntImm])
3094 MatchInfo[IntImm] = TmpReg;
3095 CurrInst = TmpInst;
3096 }
3097 // Variable index.
3098 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
3099 return false;
3100 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
3101 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
3102 if (!MatchInfo[I - 1].isValid())
3103 MatchInfo[I - 1] = TmpInst->getOperand(i: I).getReg();
3104 }
3105 return true;
3106 }
3107 // If we didn't end in a G_IMPLICIT_DEF and the source is not fully
3108 // overwritten, bail out.
3109 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
3110 all_of(Range&: MatchInfo, P: [](Register Reg) { return !!Reg; });
3111}
3112
3113void CombinerHelper::applyCombineInsertVecElts(
3114 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) const {
3115 Register UndefReg;
3116 auto GetUndef = [&]() {
3117 if (UndefReg)
3118 return UndefReg;
3119 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
3120 UndefReg = Builder.buildUndef(Res: DstTy.getScalarType()).getReg(Idx: 0);
3121 return UndefReg;
3122 };
3123 for (Register &Reg : MatchInfo) {
3124 if (!Reg)
3125 Reg = GetUndef();
3126 }
3127 Builder.buildBuildVector(Res: MI.getOperand(i: 0).getReg(), Ops: MatchInfo);
3128 MI.eraseFromParent();
3129}
3130
3131void CombinerHelper::applySimplifyAddToSub(
3132 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) const {
3133 Register SubLHS, SubRHS;
3134 std::tie(args&: SubLHS, args&: SubRHS) = MatchInfo;
3135 Builder.buildSub(Dst: MI.getOperand(i: 0).getReg(), Src0: SubLHS, Src1: SubRHS);
3136 MI.eraseFromParent();
3137}
3138
3139bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
3140 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const {
3141 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
3142 //
3143 // Creates the new hand + logic instruction (but does not insert them.)
3144 //
3145 // On success, MatchInfo is populated with the new instructions. These are
3146 // inserted in applyHoistLogicOpWithSameOpcodeHands.
3147 unsigned LogicOpcode = MI.getOpcode();
3148 assert(LogicOpcode == TargetOpcode::G_AND ||
3149 LogicOpcode == TargetOpcode::G_OR ||
3150 LogicOpcode == TargetOpcode::G_XOR);
3151 MachineIRBuilder MIB(MI);
3152 Register Dst = MI.getOperand(i: 0).getReg();
3153 Register LHSReg = MI.getOperand(i: 1).getReg();
3154 Register RHSReg = MI.getOperand(i: 2).getReg();
3155
3156 // Don't recompute anything.
3157 if (!MRI.hasOneNonDBGUse(RegNo: LHSReg) || !MRI.hasOneNonDBGUse(RegNo: RHSReg))
3158 return false;
3159
3160 // Make sure we have (hand x, ...), (hand y, ...)
3161 MachineInstr *LeftHandInst = getDefIgnoringCopies(Reg: LHSReg, MRI);
3162 MachineInstr *RightHandInst = getDefIgnoringCopies(Reg: RHSReg, MRI);
3163 if (!LeftHandInst || !RightHandInst)
3164 return false;
3165 unsigned HandOpcode = LeftHandInst->getOpcode();
3166 if (HandOpcode != RightHandInst->getOpcode())
3167 return false;
3168 if (LeftHandInst->getNumOperands() < 2 ||
3169 !LeftHandInst->getOperand(i: 1).isReg() ||
3170 RightHandInst->getNumOperands() < 2 ||
3171 !RightHandInst->getOperand(i: 1).isReg())
3172 return false;
3173
3174 // Make sure the types match up, and if we're doing this post-legalization,
3175 // we end up with legal types.
3176 Register X = LeftHandInst->getOperand(i: 1).getReg();
3177 Register Y = RightHandInst->getOperand(i: 1).getReg();
3178 LLT XTy = MRI.getType(Reg: X);
3179 LLT YTy = MRI.getType(Reg: Y);
3180 if (!XTy.isValid() || XTy != YTy)
3181 return false;
3182
3183 // Optional extra source register.
3184 Register ExtraHandOpSrcReg;
3185 switch (HandOpcode) {
3186 default:
3187 return false;
3188 case TargetOpcode::G_ANYEXT:
3189 case TargetOpcode::G_SEXT:
3190 case TargetOpcode::G_ZEXT: {
3191 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
3192 break;
3193 }
3194 case TargetOpcode::G_TRUNC: {
3195 // Match: logic (trunc X), (trunc Y) -> trunc (logic X, Y)
3196 const MachineFunction *MF = MI.getMF();
3197 LLVMContext &Ctx = MF->getFunction().getContext();
3198
3199 LLT DstTy = MRI.getType(Reg: Dst);
3200 const TargetLowering &TLI = getTargetLowering();
3201
3202 // Be extra careful sinking truncate. If it's free, there's no benefit in
3203 // widening a binop.
3204 if (TLI.isZExtFree(FromTy: DstTy, ToTy: XTy, Ctx) && TLI.isTruncateFree(FromTy: XTy, ToTy: DstTy, Ctx))
3205 return false;
3206 break;
3207 }
3208 case TargetOpcode::G_AND:
3209 case TargetOpcode::G_ASHR:
3210 case TargetOpcode::G_LSHR:
3211 case TargetOpcode::G_SHL: {
3212 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
3213 MachineOperand &ZOp = LeftHandInst->getOperand(i: 2);
3214 if (!matchEqualDefs(MOP1: ZOp, MOP2: RightHandInst->getOperand(i: 2)))
3215 return false;
3216 ExtraHandOpSrcReg = ZOp.getReg();
3217 break;
3218 }
3219 }
3220
3221 if (!isLegalOrBeforeLegalizer(Query: {LogicOpcode, {XTy, YTy}}))
3222 return false;
3223
3224 // Record the steps to build the new instructions.
3225 //
3226 // Steps to build (logic x, y)
3227 auto NewLogicDst = MRI.createGenericVirtualRegister(Ty: XTy);
3228 OperandBuildSteps LogicBuildSteps = {
3229 [=](MachineInstrBuilder &MIB) { MIB.addDef(RegNo: NewLogicDst); },
3230 [=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: X); },
3231 [=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: Y); }};
3232 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
3233
3234 // Steps to build hand (logic x, y), ...z
3235 OperandBuildSteps HandBuildSteps = {
3236 [=](MachineInstrBuilder &MIB) { MIB.addDef(RegNo: Dst); },
3237 [=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: NewLogicDst); }};
3238 if (ExtraHandOpSrcReg.isValid())
3239 HandBuildSteps.push_back(
3240 Elt: [=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: ExtraHandOpSrcReg); });
3241 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
3242
3243 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
3244 return true;
3245}
3246
3247void CombinerHelper::applyBuildInstructionSteps(
3248 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const {
3249 assert(MatchInfo.InstrsToBuild.size() &&
3250 "Expected at least one instr to build?");
3251 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
3252 assert(InstrToBuild.Opcode && "Expected a valid opcode?");
3253 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
3254 MachineInstrBuilder Instr = Builder.buildInstr(Opcode: InstrToBuild.Opcode);
3255 for (auto &OperandFn : InstrToBuild.OperandFns)
3256 OperandFn(Instr);
3257 }
3258 MI.eraseFromParent();
3259}
3260
3261bool CombinerHelper::matchAshrShlToSextInreg(
3262 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) const {
3263 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3264 int64_t ShlCst, AshrCst;
3265 Register Src;
3266 if (!mi_match(R: MI.getOperand(i: 0).getReg(), MRI,
3267 P: m_GAShr(L: m_GShl(L: m_Reg(R&: Src), R: m_ICstOrSplat(Cst&: ShlCst)),
3268 R: m_ICstOrSplat(Cst&: AshrCst))))
3269 return false;
3270 if (ShlCst != AshrCst)
3271 return false;
3272 if (!isLegalOrBeforeLegalizer(
3273 Query: {TargetOpcode::G_SEXT_INREG, {MRI.getType(Reg: Src)}}))
3274 return false;
3275 MatchInfo = std::make_tuple(args&: Src, args&: ShlCst);
3276 return true;
3277}
3278
3279void CombinerHelper::applyAshShlToSextInreg(
3280 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) const {
3281 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3282 Register Src;
3283 int64_t ShiftAmt;
3284 std::tie(args&: Src, args&: ShiftAmt) = MatchInfo;
3285 unsigned Size = MRI.getType(Reg: Src).getScalarSizeInBits();
3286 Builder.buildSExtInReg(Res: MI.getOperand(i: 0).getReg(), Op: Src, ImmOp: Size - ShiftAmt);
3287 MI.eraseFromParent();
3288}
3289
3290/// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
3291bool CombinerHelper::matchOverlappingAnd(
3292 MachineInstr &MI,
3293 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
3294 assert(MI.getOpcode() == TargetOpcode::G_AND);
3295
3296 Register Dst = MI.getOperand(i: 0).getReg();
3297 LLT Ty = MRI.getType(Reg: Dst);
3298
3299 Register R;
3300 int64_t C1;
3301 int64_t C2;
3302 if (!mi_match(
3303 R: Dst, MRI,
3304 P: m_GAnd(L: m_GAnd(L: m_Reg(R), R: m_ICst(Cst&: C1)), R: m_ICst(Cst&: C2))))
3305 return false;
3306
3307 MatchInfo = [=](MachineIRBuilder &B) {
3308 if (C1 & C2) {
3309 B.buildAnd(Dst, Src0: R, Src1: B.buildConstant(Res: Ty, Val: C1 & C2));
3310 return;
3311 }
3312 auto Zero = B.buildConstant(Res: Ty, Val: 0);
3313 replaceRegWith(MRI, FromReg: Dst, ToReg: Zero->getOperand(i: 0).getReg());
3314 };
3315 return true;
3316}
3317
3318bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
3319 Register &Replacement) const {
3320 // Given
3321 //
3322 // %y:_(sN) = G_SOMETHING
3323 // %x:_(sN) = G_SOMETHING
3324 // %res:_(sN) = G_AND %x, %y
3325 //
3326 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
3327 //
3328 // Patterns like this can appear as a result of legalization. E.g.
3329 //
3330 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
3331 // %one:_(s32) = G_CONSTANT i32 1
3332 // %and:_(s32) = G_AND %cmp, %one
3333 //
3334 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
3335 assert(MI.getOpcode() == TargetOpcode::G_AND);
3336 if (!VT)
3337 return false;
3338
3339 Register AndDst = MI.getOperand(i: 0).getReg();
3340 Register LHS = MI.getOperand(i: 1).getReg();
3341 Register RHS = MI.getOperand(i: 2).getReg();
3342
3343 // Check the RHS (maybe a constant) first, and if we have no KnownBits there,
3344 // we can't do anything. If we do, then it depends on whether we have
3345 // KnownBits on the LHS.
3346 KnownBits RHSBits = VT->getKnownBits(R: RHS);
3347 if (RHSBits.isUnknown())
3348 return false;
3349
3350 KnownBits LHSBits = VT->getKnownBits(R: LHS);
3351
3352 // Check that x & Mask == x.
3353 // x & 1 == x, always
3354 // x & 0 == x, only if x is also 0
3355 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
3356 //
3357 // Check if we can replace AndDst with the LHS of the G_AND
3358 if (canReplaceReg(DstReg: AndDst, SrcReg: LHS, MRI) &&
3359 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
3360 Replacement = LHS;
3361 return true;
3362 }
3363
3364 // Check if we can replace AndDst with the RHS of the G_AND
3365 if (canReplaceReg(DstReg: AndDst, SrcReg: RHS, MRI) &&
3366 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
3367 Replacement = RHS;
3368 return true;
3369 }
3370
3371 return false;
3372}
3373
3374bool CombinerHelper::matchRedundantOr(MachineInstr &MI,
3375 Register &Replacement) const {
3376 // Given
3377 //
3378 // %y:_(sN) = G_SOMETHING
3379 // %x:_(sN) = G_SOMETHING
3380 // %res:_(sN) = G_OR %x, %y
3381 //
3382 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
3383 assert(MI.getOpcode() == TargetOpcode::G_OR);
3384 if (!VT)
3385 return false;
3386
3387 Register OrDst = MI.getOperand(i: 0).getReg();
3388 Register LHS = MI.getOperand(i: 1).getReg();
3389 Register RHS = MI.getOperand(i: 2).getReg();
3390
3391 KnownBits LHSBits = VT->getKnownBits(R: LHS);
3392 KnownBits RHSBits = VT->getKnownBits(R: RHS);
3393
3394 // Check that x | Mask == x.
3395 // x | 0 == x, always
3396 // x | 1 == x, only if x is also 1
3397 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
3398 //
3399 // Check if we can replace OrDst with the LHS of the G_OR
3400 if (canReplaceReg(DstReg: OrDst, SrcReg: LHS, MRI) &&
3401 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
3402 Replacement = LHS;
3403 return true;
3404 }
3405
3406 // Check if we can replace OrDst with the RHS of the G_OR
3407 if (canReplaceReg(DstReg: OrDst, SrcReg: RHS, MRI) &&
3408 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
3409 Replacement = RHS;
3410 return true;
3411 }
3412
3413 return false;
3414}
3415
3416bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) const {
3417 // If the input is already sign extended, just drop the extension.
3418 Register Src = MI.getOperand(i: 1).getReg();
3419 unsigned ExtBits = MI.getOperand(i: 2).getImm();
3420 unsigned TypeSize = MRI.getType(Reg: Src).getScalarSizeInBits();
3421 return VT->computeNumSignBits(R: Src) >= (TypeSize - ExtBits + 1);
3422}
3423
3424static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3425 int64_t Cst, bool IsVector, bool IsFP) {
3426 // For i1, Cst will always be -1 regardless of boolean contents.
3427 return (ScalarSizeBits == 1 && Cst == -1) ||
3428 isConstTrueVal(TLI, Val: Cst, IsVector, IsFP);
3429}
3430
3431// This combine tries to reduce the number of scalarised G_TRUNC instructions by
3432// using vector truncates instead
3433//
3434// EXAMPLE:
3435// %a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
3436// %T_a(i16) = G_TRUNC %a(i32)
3437// %T_b(i16) = G_TRUNC %b(i32)
3438// %Undef(i16) = G_IMPLICIT_DEF(i16)
3439// %dst(v4i16) = G_BUILD_VECTORS %T_a(i16), %T_b(i16), %Undef(i16), %Undef(i16)
3440//
3441// ===>
3442// %Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
3443// %Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
3444// %dst(<4 x s16>) = G_TRUNC %Mid(<4 x s32>)
3445//
3446// Only matches sources made up of G_TRUNCs followed by G_IMPLICIT_DEFs
3447bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
3448 Register &MatchInfo) const {
3449 auto BuildMI = cast<GBuildVector>(Val: &MI);
3450 unsigned NumOperands = BuildMI->getNumSources();
3451 LLT DstTy = MRI.getType(Reg: BuildMI->getReg(Idx: 0));
3452
3453 // Check the G_BUILD_VECTOR sources
3454 unsigned I;
3455 MachineInstr *UnmergeMI = nullptr;
3456
3457 // Check all source TRUNCs come from the same UNMERGE instruction
3458 for (I = 0; I < NumOperands; ++I) {
3459 auto SrcMI = MRI.getVRegDef(Reg: BuildMI->getSourceReg(I));
3460 auto SrcMIOpc = SrcMI->getOpcode();
3461
3462 // Check if the G_TRUNC instructions all come from the same MI
3463 if (SrcMIOpc == TargetOpcode::G_TRUNC) {
3464 if (!UnmergeMI) {
3465 UnmergeMI = MRI.getVRegDef(Reg: SrcMI->getOperand(i: 1).getReg());
3466 if (UnmergeMI->getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
3467 return false;
3468 } else {
3469 auto UnmergeSrcMI = MRI.getVRegDef(Reg: SrcMI->getOperand(i: 1).getReg());
3470 if (UnmergeMI != UnmergeSrcMI)
3471 return false;
3472 }
3473 } else {
3474 break;
3475 }
3476 }
3477 if (I < 2)
3478 return false;
3479
3480 // Check the remaining source elements are only G_IMPLICIT_DEF
3481 for (; I < NumOperands; ++I) {
3482 auto SrcMI = MRI.getVRegDef(Reg: BuildMI->getSourceReg(I));
3483 auto SrcMIOpc = SrcMI->getOpcode();
3484
3485 if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
3486 return false;
3487 }
3488
3489 // Check the size of unmerge source
3490 MatchInfo = cast<GUnmerge>(Val: UnmergeMI)->getSourceReg();
3491 LLT UnmergeSrcTy = MRI.getType(Reg: MatchInfo);
3492 if (!DstTy.getElementCount().isKnownMultipleOf(RHS: UnmergeSrcTy.getNumElements()))
3493 return false;
3494
3495 // Check the unmerge source and destination element types match
3496 LLT UnmergeSrcEltTy = UnmergeSrcTy.getElementType();
3497 Register UnmergeDstReg = UnmergeMI->getOperand(i: 0).getReg();
3498 LLT UnmergeDstEltTy = MRI.getType(Reg: UnmergeDstReg);
3499 if (UnmergeSrcEltTy != UnmergeDstEltTy)
3500 return false;
3501
3502 // Only generate legal instructions post-legalizer
3503 if (!IsPreLegalize) {
3504 LLT MidTy = DstTy.changeElementType(NewEltTy: UnmergeSrcTy.getScalarType());
3505
3506 if (DstTy.getElementCount() != UnmergeSrcTy.getElementCount() &&
3507 !isLegal(Query: {TargetOpcode::G_CONCAT_VECTORS, {MidTy, UnmergeSrcTy}}))
3508 return false;
3509
3510 if (!isLegal(Query: {TargetOpcode::G_TRUNC, {DstTy, MidTy}}))
3511 return false;
3512 }
3513
3514 return true;
3515}
3516
3517void CombinerHelper::applyUseVectorTruncate(MachineInstr &MI,
3518 Register &MatchInfo) const {
3519 Register MidReg;
3520 auto BuildMI = cast<GBuildVector>(Val: &MI);
3521 Register DstReg = BuildMI->getReg(Idx: 0);
3522 LLT DstTy = MRI.getType(Reg: DstReg);
3523 LLT UnmergeSrcTy = MRI.getType(Reg: MatchInfo);
3524 unsigned DstTyNumElt = DstTy.getNumElements();
3525 unsigned UnmergeSrcTyNumElt = UnmergeSrcTy.getNumElements();
3526
3527 // No need to pad vector if only G_TRUNC is needed
3528 if (DstTyNumElt / UnmergeSrcTyNumElt == 1) {
3529 MidReg = MatchInfo;
3530 } else {
3531 Register UndefReg = Builder.buildUndef(Res: UnmergeSrcTy).getReg(Idx: 0);
3532 SmallVector<Register> ConcatRegs = {MatchInfo};
3533 for (unsigned I = 1; I < DstTyNumElt / UnmergeSrcTyNumElt; ++I)
3534 ConcatRegs.push_back(Elt: UndefReg);
3535
3536 auto MidTy = DstTy.changeElementType(NewEltTy: UnmergeSrcTy.getScalarType());
3537 MidReg = Builder.buildConcatVectors(Res: MidTy, Ops: ConcatRegs).getReg(Idx: 0);
3538 }
3539
3540 Builder.buildTrunc(Res: DstReg, Op: MidReg);
3541 MI.eraseFromParent();
3542}
3543
3544bool CombinerHelper::matchNotCmp(
3545 MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate) const {
3546 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3547 LLT Ty = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
3548 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3549 Register XorSrc;
3550 Register CstReg;
3551 // We match xor(src, true) here.
3552 if (!mi_match(R: MI.getOperand(i: 0).getReg(), MRI,
3553 P: m_GXor(L: m_Reg(R&: XorSrc), R: m_Reg(R&: CstReg))))
3554 return false;
3555
3556 if (!MRI.hasOneNonDBGUse(RegNo: XorSrc))
3557 return false;
3558
3559 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3560 // and ORs. The suffix of RegsToNegate starting from index I is used a work
3561 // list of tree nodes to visit.
3562 RegsToNegate.push_back(Elt: XorSrc);
3563 // Remember whether the comparisons are all integer or all floating point.
3564 bool IsInt = false;
3565 bool IsFP = false;
3566 for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3567 Register Reg = RegsToNegate[I];
3568 if (!MRI.hasOneNonDBGUse(RegNo: Reg))
3569 return false;
3570 MachineInstr *Def = MRI.getVRegDef(Reg);
3571 switch (Def->getOpcode()) {
3572 default:
3573 // Don't match if the tree contains anything other than ANDs, ORs and
3574 // comparisons.
3575 return false;
3576 case TargetOpcode::G_ICMP:
3577 if (IsFP)
3578 return false;
3579 IsInt = true;
3580 // When we apply the combine we will invert the predicate.
3581 break;
3582 case TargetOpcode::G_FCMP:
3583 if (IsInt)
3584 return false;
3585 IsFP = true;
3586 // When we apply the combine we will invert the predicate.
3587 break;
3588 case TargetOpcode::G_AND:
3589 case TargetOpcode::G_OR:
3590 // Implement De Morgan's laws:
3591 // ~(x & y) -> ~x | ~y
3592 // ~(x | y) -> ~x & ~y
3593 // When we apply the combine we will change the opcode and recursively
3594 // negate the operands.
3595 RegsToNegate.push_back(Elt: Def->getOperand(i: 1).getReg());
3596 RegsToNegate.push_back(Elt: Def->getOperand(i: 2).getReg());
3597 break;
3598 }
3599 }
3600
3601 // Now we know whether the comparisons are integer or floating point, check
3602 // the constant in the xor.
3603 int64_t Cst;
3604 if (Ty.isVector()) {
3605 MachineInstr *CstDef = MRI.getVRegDef(Reg: CstReg);
3606 auto MaybeCst = getIConstantSplatSExtVal(MI: *CstDef, MRI);
3607 if (!MaybeCst)
3608 return false;
3609 if (!isConstValidTrue(TLI, ScalarSizeBits: Ty.getScalarSizeInBits(), Cst: *MaybeCst, IsVector: true, IsFP))
3610 return false;
3611 } else {
3612 if (!mi_match(R: CstReg, MRI, P: m_ICst(Cst)))
3613 return false;
3614 if (!isConstValidTrue(TLI, ScalarSizeBits: Ty.getSizeInBits(), Cst, IsVector: false, IsFP))
3615 return false;
3616 }
3617
3618 return true;
3619}
3620
3621void CombinerHelper::applyNotCmp(
3622 MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate) const {
3623 for (Register Reg : RegsToNegate) {
3624 MachineInstr *Def = MRI.getVRegDef(Reg);
3625 Observer.changingInstr(MI&: *Def);
3626 // For each comparison, invert the opcode. For each AND and OR, change the
3627 // opcode.
3628 switch (Def->getOpcode()) {
3629 default:
3630 llvm_unreachable("Unexpected opcode");
3631 case TargetOpcode::G_ICMP:
3632 case TargetOpcode::G_FCMP: {
3633 MachineOperand &PredOp = Def->getOperand(i: 1);
3634 CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3635 pred: (CmpInst::Predicate)PredOp.getPredicate());
3636 PredOp.setPredicate(NewP);
3637 break;
3638 }
3639 case TargetOpcode::G_AND:
3640 Def->setDesc(Builder.getTII().get(Opcode: TargetOpcode::G_OR));
3641 break;
3642 case TargetOpcode::G_OR:
3643 Def->setDesc(Builder.getTII().get(Opcode: TargetOpcode::G_AND));
3644 break;
3645 }
3646 Observer.changedInstr(MI&: *Def);
3647 }
3648
3649 replaceRegWith(MRI, FromReg: MI.getOperand(i: 0).getReg(), ToReg: MI.getOperand(i: 1).getReg());
3650 MI.eraseFromParent();
3651}
3652
3653bool CombinerHelper::matchXorOfAndWithSameReg(
3654 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) const {
3655 // Match (xor (and x, y), y) (or any of its commuted cases)
3656 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3657 Register &X = MatchInfo.first;
3658 Register &Y = MatchInfo.second;
3659 Register AndReg = MI.getOperand(i: 1).getReg();
3660 Register SharedReg = MI.getOperand(i: 2).getReg();
3661
3662 // Find a G_AND on either side of the G_XOR.
3663 // Look for one of
3664 //
3665 // (xor (and x, y), SharedReg)
3666 // (xor SharedReg, (and x, y))
3667 if (!mi_match(R: AndReg, MRI, P: m_GAnd(L: m_Reg(R&: X), R: m_Reg(R&: Y)))) {
3668 std::swap(a&: AndReg, b&: SharedReg);
3669 if (!mi_match(R: AndReg, MRI, P: m_GAnd(L: m_Reg(R&: X), R: m_Reg(R&: Y))))
3670 return false;
3671 }
3672
3673 // Only do this if we'll eliminate the G_AND.
3674 if (!MRI.hasOneNonDBGUse(RegNo: AndReg))
3675 return false;
3676
3677 // We can combine if SharedReg is the same as either the LHS or RHS of the
3678 // G_AND.
3679 if (Y != SharedReg)
3680 std::swap(a&: X, b&: Y);
3681 return Y == SharedReg;
3682}
3683
3684void CombinerHelper::applyXorOfAndWithSameReg(
3685 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) const {
3686 // Fold (xor (and x, y), y) -> (and (not x), y)
3687 Register X, Y;
3688 std::tie(args&: X, args&: Y) = MatchInfo;
3689 auto Not = Builder.buildNot(Dst: MRI.getType(Reg: X), Src0: X);
3690 Observer.changingInstr(MI);
3691 MI.setDesc(Builder.getTII().get(Opcode: TargetOpcode::G_AND));
3692 MI.getOperand(i: 1).setReg(Not->getOperand(i: 0).getReg());
3693 MI.getOperand(i: 2).setReg(Y);
3694 Observer.changedInstr(MI);
3695}
3696
3697bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) const {
3698 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
3699 Register DstReg = PtrAdd.getReg(Idx: 0);
3700 LLT Ty = MRI.getType(Reg: DstReg);
3701 const DataLayout &DL = Builder.getMF().getDataLayout();
3702
3703 if (DL.isNonIntegralAddressSpace(AddrSpace: Ty.getScalarType().getAddressSpace()))
3704 return false;
3705
3706 if (Ty.isPointer()) {
3707 auto ConstVal = getIConstantVRegVal(VReg: PtrAdd.getBaseReg(), MRI);
3708 return ConstVal && *ConstVal == 0;
3709 }
3710
3711 assert(Ty.isVector() && "Expecting a vector type");
3712 const MachineInstr *VecMI = MRI.getVRegDef(Reg: PtrAdd.getBaseReg());
3713 return isBuildVectorAllZeros(MI: *VecMI, MRI);
3714}
3715
3716void CombinerHelper::applyPtrAddZero(MachineInstr &MI) const {
3717 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
3718 Builder.buildIntToPtr(Dst: PtrAdd.getReg(Idx: 0), Src: PtrAdd.getOffsetReg());
3719 PtrAdd.eraseFromParent();
3720}
3721
3722/// The second source operand is known to be a power of 2.
3723void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) const {
3724 Register DstReg = MI.getOperand(i: 0).getReg();
3725 Register Src0 = MI.getOperand(i: 1).getReg();
3726 Register Pow2Src1 = MI.getOperand(i: 2).getReg();
3727 LLT Ty = MRI.getType(Reg: DstReg);
3728
3729 // Fold (urem x, pow2) -> (and x, pow2-1)
3730 auto NegOne = Builder.buildConstant(Res: Ty, Val: -1);
3731 auto Add = Builder.buildAdd(Dst: Ty, Src0: Pow2Src1, Src1: NegOne);
3732 Builder.buildAnd(Dst: DstReg, Src0, Src1: Add);
3733 MI.eraseFromParent();
3734}
3735
3736bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
3737 unsigned &SelectOpNo) const {
3738 Register LHS = MI.getOperand(i: 1).getReg();
3739 Register RHS = MI.getOperand(i: 2).getReg();
3740
3741 Register OtherOperandReg = RHS;
3742 SelectOpNo = 1;
3743 MachineInstr *Select = MRI.getVRegDef(Reg: LHS);
3744
3745 // Don't do this unless the old select is going away. We want to eliminate the
3746 // binary operator, not replace a binop with a select.
3747 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3748 !MRI.hasOneNonDBGUse(RegNo: LHS)) {
3749 OtherOperandReg = LHS;
3750 SelectOpNo = 2;
3751 Select = MRI.getVRegDef(Reg: RHS);
3752 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3753 !MRI.hasOneNonDBGUse(RegNo: RHS))
3754 return false;
3755 }
3756
3757 MachineInstr *SelectLHS = MRI.getVRegDef(Reg: Select->getOperand(i: 2).getReg());
3758 MachineInstr *SelectRHS = MRI.getVRegDef(Reg: Select->getOperand(i: 3).getReg());
3759
3760 if (!isConstantOrConstantVector(MI: *SelectLHS, MRI,
3761 /*AllowFP*/ true,
3762 /*AllowOpaqueConstants*/ false))
3763 return false;
3764 if (!isConstantOrConstantVector(MI: *SelectRHS, MRI,
3765 /*AllowFP*/ true,
3766 /*AllowOpaqueConstants*/ false))
3767 return false;
3768
3769 unsigned BinOpcode = MI.getOpcode();
3770
3771 // We know that one of the operands is a select of constants. Now verify that
3772 // the other binary operator operand is either a constant, or we can handle a
3773 // variable.
3774 bool CanFoldNonConst =
3775 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3776 (isNullOrNullSplat(MI: *SelectLHS, MRI) ||
3777 isAllOnesOrAllOnesSplat(MI: *SelectLHS, MRI)) &&
3778 (isNullOrNullSplat(MI: *SelectRHS, MRI) ||
3779 isAllOnesOrAllOnesSplat(MI: *SelectRHS, MRI));
3780 if (CanFoldNonConst)
3781 return true;
3782
3783 return isConstantOrConstantVector(MI: *MRI.getVRegDef(Reg: OtherOperandReg), MRI,
3784 /*AllowFP*/ true,
3785 /*AllowOpaqueConstants*/ false);
3786}
3787
3788/// \p SelectOperand is the operand in binary operator \p MI that is the select
3789/// to fold.
3790void CombinerHelper::applyFoldBinOpIntoSelect(
3791 MachineInstr &MI, const unsigned &SelectOperand) const {
3792 Register Dst = MI.getOperand(i: 0).getReg();
3793 Register LHS = MI.getOperand(i: 1).getReg();
3794 Register RHS = MI.getOperand(i: 2).getReg();
3795 MachineInstr *Select = MRI.getVRegDef(Reg: MI.getOperand(i: SelectOperand).getReg());
3796
3797 Register SelectCond = Select->getOperand(i: 1).getReg();
3798 Register SelectTrue = Select->getOperand(i: 2).getReg();
3799 Register SelectFalse = Select->getOperand(i: 3).getReg();
3800
3801 LLT Ty = MRI.getType(Reg: Dst);
3802 unsigned BinOpcode = MI.getOpcode();
3803
3804 Register FoldTrue, FoldFalse;
3805
3806 // We have a select-of-constants followed by a binary operator with a
3807 // constant. Eliminate the binop by pulling the constant math into the select.
3808 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3809 if (SelectOperand == 1) {
3810 // TODO: SelectionDAG verifies this actually constant folds before
3811 // committing to the combine.
3812
3813 FoldTrue = Builder.buildInstr(Opc: BinOpcode, DstOps: {Ty}, SrcOps: {SelectTrue, RHS}).getReg(Idx: 0);
3814 FoldFalse =
3815 Builder.buildInstr(Opc: BinOpcode, DstOps: {Ty}, SrcOps: {SelectFalse, RHS}).getReg(Idx: 0);
3816 } else {
3817 FoldTrue = Builder.buildInstr(Opc: BinOpcode, DstOps: {Ty}, SrcOps: {LHS, SelectTrue}).getReg(Idx: 0);
3818 FoldFalse =
3819 Builder.buildInstr(Opc: BinOpcode, DstOps: {Ty}, SrcOps: {LHS, SelectFalse}).getReg(Idx: 0);
3820 }
3821
3822 Builder.buildSelect(Res: Dst, Tst: SelectCond, Op0: FoldTrue, Op1: FoldFalse, Flags: MI.getFlags());
3823 MI.eraseFromParent();
3824}
3825
3826std::optional<SmallVector<Register, 8>>
3827CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3828 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3829 // We want to detect if Root is part of a tree which represents a bunch
3830 // of loads being merged into a larger load. We'll try to recognize patterns
3831 // like, for example:
3832 //
3833 // Reg Reg
3834 // \ /
3835 // OR_1 Reg
3836 // \ /
3837 // OR_2
3838 // \ Reg
3839 // .. /
3840 // Root
3841 //
3842 // Reg Reg Reg Reg
3843 // \ / \ /
3844 // OR_1 OR_2
3845 // \ /
3846 // \ /
3847 // ...
3848 // Root
3849 //
3850 // Each "Reg" may have been produced by a load + some arithmetic. This
3851 // function will save each of them.
3852 SmallVector<Register, 8> RegsToVisit;
3853 SmallVector<const MachineInstr *, 7> Ors = {Root};
3854
3855 // In the "worst" case, we're dealing with a load for each byte. So, there
3856 // are at most #bytes - 1 ORs.
3857 const unsigned MaxIter =
3858 MRI.getType(Reg: Root->getOperand(i: 0).getReg()).getSizeInBytes() - 1;
3859 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3860 if (Ors.empty())
3861 break;
3862 const MachineInstr *Curr = Ors.pop_back_val();
3863 Register OrLHS = Curr->getOperand(i: 1).getReg();
3864 Register OrRHS = Curr->getOperand(i: 2).getReg();
3865
3866 // In the combine, we want to elimate the entire tree.
3867 if (!MRI.hasOneNonDBGUse(RegNo: OrLHS) || !MRI.hasOneNonDBGUse(RegNo: OrRHS))
3868 return std::nullopt;
3869
3870 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3871 // something that may be a load + arithmetic.
3872 if (const MachineInstr *Or = getOpcodeDef(Opcode: TargetOpcode::G_OR, Reg: OrLHS, MRI))
3873 Ors.push_back(Elt: Or);
3874 else
3875 RegsToVisit.push_back(Elt: OrLHS);
3876 if (const MachineInstr *Or = getOpcodeDef(Opcode: TargetOpcode::G_OR, Reg: OrRHS, MRI))
3877 Ors.push_back(Elt: Or);
3878 else
3879 RegsToVisit.push_back(Elt: OrRHS);
3880 }
3881
3882 // We're going to try and merge each register into a wider power-of-2 type,
3883 // so we ought to have an even number of registers.
3884 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3885 return std::nullopt;
3886 return RegsToVisit;
3887}
3888
3889/// Helper function for findLoadOffsetsForLoadOrCombine.
3890///
3891/// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3892/// and then moving that value into a specific byte offset.
3893///
3894/// e.g. x[i] << 24
3895///
3896/// \returns The load instruction and the byte offset it is moved into.
3897static std::optional<std::pair<GZExtLoad *, int64_t>>
3898matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3899 const MachineRegisterInfo &MRI) {
3900 assert(MRI.hasOneNonDBGUse(Reg) &&
3901 "Expected Reg to only have one non-debug use?");
3902 Register MaybeLoad;
3903 int64_t Shift;
3904 if (!mi_match(R: Reg, MRI,
3905 P: m_OneNonDBGUse(SP: m_GShl(L: m_Reg(R&: MaybeLoad), R: m_ICst(Cst&: Shift))))) {
3906 Shift = 0;
3907 MaybeLoad = Reg;
3908 }
3909
3910 if (Shift % MemSizeInBits != 0)
3911 return std::nullopt;
3912
3913 // TODO: Handle other types of loads.
3914 auto *Load = getOpcodeDef<GZExtLoad>(Reg: MaybeLoad, MRI);
3915 if (!Load)
3916 return std::nullopt;
3917
3918 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3919 return std::nullopt;
3920
3921 return std::make_pair(x&: Load, y: Shift / MemSizeInBits);
3922}
3923
3924std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3925CombinerHelper::findLoadOffsetsForLoadOrCombine(
3926 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3927 const SmallVector<Register, 8> &RegsToVisit,
3928 const unsigned MemSizeInBits) const {
3929
3930 // Each load found for the pattern. There should be one for each RegsToVisit.
3931 SmallSetVector<const MachineInstr *, 8> Loads;
3932
3933 // The lowest index used in any load. (The lowest "i" for each x[i].)
3934 int64_t LowestIdx = INT64_MAX;
3935
3936 // The load which uses the lowest index.
3937 GZExtLoad *LowestIdxLoad = nullptr;
3938
3939 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3940 SmallSet<int64_t, 8> SeenIdx;
3941
3942 // Ensure each load is in the same MBB.
3943 // TODO: Support multiple MachineBasicBlocks.
3944 MachineBasicBlock *MBB = nullptr;
3945 const MachineMemOperand *MMO = nullptr;
3946
3947 // Earliest instruction-order load in the pattern.
3948 GZExtLoad *EarliestLoad = nullptr;
3949
3950 // Latest instruction-order load in the pattern.
3951 GZExtLoad *LatestLoad = nullptr;
3952
3953 // Base pointer which every load should share.
3954 Register BasePtr;
3955
3956 // We want to find a load for each register. Each load should have some
3957 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3958 // track of the load which uses the lowest index. Later, we will check if we
3959 // can use its pointer in the final, combined load.
3960 for (auto Reg : RegsToVisit) {
3961 // Find the load, and find the position that it will end up in (e.g. a
3962 // shifted) value.
3963 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3964 if (!LoadAndPos)
3965 return std::nullopt;
3966 GZExtLoad *Load;
3967 int64_t DstPos;
3968 std::tie(args&: Load, args&: DstPos) = *LoadAndPos;
3969
3970 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3971 // it is difficult to check for stores/calls/etc between loads.
3972 MachineBasicBlock *LoadMBB = Load->getParent();
3973 if (!MBB)
3974 MBB = LoadMBB;
3975 if (LoadMBB != MBB)
3976 return std::nullopt;
3977
3978 // Make sure that the MachineMemOperands of every seen load are compatible.
3979 auto &LoadMMO = Load->getMMO();
3980 if (!MMO)
3981 MMO = &LoadMMO;
3982 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3983 return std::nullopt;
3984
3985 // Find out what the base pointer and index for the load is.
3986 Register LoadPtr;
3987 int64_t Idx;
3988 if (!mi_match(R: Load->getOperand(i: 1).getReg(), MRI,
3989 P: m_GPtrAdd(L: m_Reg(R&: LoadPtr), R: m_ICst(Cst&: Idx)))) {
3990 LoadPtr = Load->getOperand(i: 1).getReg();
3991 Idx = 0;
3992 }
3993
3994 // Don't combine things like a[i], a[i] -> a bigger load.
3995 if (!SeenIdx.insert(V: Idx).second)
3996 return std::nullopt;
3997
3998 // Every load must share the same base pointer; don't combine things like:
3999 //
4000 // a[i], b[i + 1] -> a bigger load.
4001 if (!BasePtr.isValid())
4002 BasePtr = LoadPtr;
4003 if (BasePtr != LoadPtr)
4004 return std::nullopt;
4005
4006 if (Idx < LowestIdx) {
4007 LowestIdx = Idx;
4008 LowestIdxLoad = Load;
4009 }
4010
4011 // Keep track of the byte offset that this load ends up at. If we have seen
4012 // the byte offset, then stop here. We do not want to combine:
4013 //
4014 // a[i] << 16, a[i + k] << 16 -> a bigger load.
4015 if (!MemOffset2Idx.try_emplace(Key: DstPos, Args&: Idx).second)
4016 return std::nullopt;
4017 Loads.insert(X: Load);
4018
4019 // Keep track of the position of the earliest/latest loads in the pattern.
4020 // We will check that there are no load fold barriers between them later
4021 // on.
4022 //
4023 // FIXME: Is there a better way to check for load fold barriers?
4024 if (!EarliestLoad || dominates(DefMI: *Load, UseMI: *EarliestLoad))
4025 EarliestLoad = Load;
4026 if (!LatestLoad || dominates(DefMI: *LatestLoad, UseMI: *Load))
4027 LatestLoad = Load;
4028 }
4029
4030 // We found a load for each register. Let's check if each load satisfies the
4031 // pattern.
4032 assert(Loads.size() == RegsToVisit.size() &&
4033 "Expected to find a load for each register?");
4034 assert(EarliestLoad != LatestLoad && EarliestLoad &&
4035 LatestLoad && "Expected at least two loads?");
4036
4037 // Check if there are any stores, calls, etc. between any of the loads. If
4038 // there are, then we can't safely perform the combine.
4039 //
4040 // MaxIter is chosen based off the (worst case) number of iterations it
4041 // typically takes to succeed in the LLVM test suite plus some padding.
4042 //
4043 // FIXME: Is there a better way to check for load fold barriers?
4044 const unsigned MaxIter = 20;
4045 unsigned Iter = 0;
4046 for (const auto &MI : instructionsWithoutDebug(It: EarliestLoad->getIterator(),
4047 End: LatestLoad->getIterator())) {
4048 if (Loads.count(key: &MI))
4049 continue;
4050 if (MI.isLoadFoldBarrier())
4051 return std::nullopt;
4052 if (Iter++ == MaxIter)
4053 return std::nullopt;
4054 }
4055
4056 return std::make_tuple(args&: LowestIdxLoad, args&: LowestIdx, args&: LatestLoad);
4057}
4058
4059bool CombinerHelper::matchLoadOrCombine(
4060 MachineInstr &MI,
4061 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4062 assert(MI.getOpcode() == TargetOpcode::G_OR);
4063 MachineFunction &MF = *MI.getMF();
4064 // Assuming a little-endian target, transform:
4065 // s8 *a = ...
4066 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
4067 // =>
4068 // s32 val = *((i32)a)
4069 //
4070 // s8 *a = ...
4071 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
4072 // =>
4073 // s32 val = BSWAP(*((s32)a))
4074 Register Dst = MI.getOperand(i: 0).getReg();
4075 LLT Ty = MRI.getType(Reg: Dst);
4076 if (Ty.isVector())
4077 return false;
4078
4079 // We need to combine at least two loads into this type. Since the smallest
4080 // possible load is into a byte, we need at least a 16-bit wide type.
4081 const unsigned WideMemSizeInBits = Ty.getSizeInBits();
4082 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
4083 return false;
4084
4085 // Match a collection of non-OR instructions in the pattern.
4086 auto RegsToVisit = findCandidatesForLoadOrCombine(Root: &MI);
4087 if (!RegsToVisit)
4088 return false;
4089
4090 // We have a collection of non-OR instructions. Figure out how wide each of
4091 // the small loads should be based off of the number of potential loads we
4092 // found.
4093 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
4094 if (NarrowMemSizeInBits % 8 != 0)
4095 return false;
4096
4097 // Check if each register feeding into each OR is a load from the same
4098 // base pointer + some arithmetic.
4099 //
4100 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
4101 //
4102 // Also verify that each of these ends up putting a[i] into the same memory
4103 // offset as a load into a wide type would.
4104 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
4105 GZExtLoad *LowestIdxLoad, *LatestLoad;
4106 int64_t LowestIdx;
4107 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
4108 MemOffset2Idx, RegsToVisit: *RegsToVisit, MemSizeInBits: NarrowMemSizeInBits);
4109 if (!MaybeLoadInfo)
4110 return false;
4111 std::tie(args&: LowestIdxLoad, args&: LowestIdx, args&: LatestLoad) = *MaybeLoadInfo;
4112
4113 // We have a bunch of loads being OR'd together. Using the addresses + offsets
4114 // we found before, check if this corresponds to a big or little endian byte
4115 // pattern. If it does, then we can represent it using a load + possibly a
4116 // BSWAP.
4117 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
4118 std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
4119 if (!IsBigEndian)
4120 return false;
4121 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
4122 if (NeedsBSwap && !isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_BSWAP, {Ty}}))
4123 return false;
4124
4125 // Make sure that the load from the lowest index produces offset 0 in the
4126 // final value.
4127 //
4128 // This ensures that we won't combine something like this:
4129 //
4130 // load x[i] -> byte 2
4131 // load x[i+1] -> byte 0 ---> wide_load x[i]
4132 // load x[i+2] -> byte 1
4133 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
4134 const unsigned ZeroByteOffset =
4135 *IsBigEndian
4136 ? bigEndianByteAt(ByteWidth: NumLoadsInTy, I: 0)
4137 : littleEndianByteAt(ByteWidth: NumLoadsInTy, I: 0);
4138 auto ZeroOffsetIdx = MemOffset2Idx.find(Val: ZeroByteOffset);
4139 if (ZeroOffsetIdx == MemOffset2Idx.end() ||
4140 ZeroOffsetIdx->second != LowestIdx)
4141 return false;
4142
4143 // We wil reuse the pointer from the load which ends up at byte offset 0. It
4144 // may not use index 0.
4145 Register Ptr = LowestIdxLoad->getPointerReg();
4146 const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
4147 LegalityQuery::MemDesc MMDesc(MMO);
4148 MMDesc.MemoryTy = Ty;
4149 if (!isLegalOrBeforeLegalizer(
4150 Query: {TargetOpcode::G_LOAD, {Ty, MRI.getType(Reg: Ptr)}, {MMDesc}}))
4151 return false;
4152 auto PtrInfo = MMO.getPointerInfo();
4153 auto *NewMMO = MF.getMachineMemOperand(MMO: &MMO, PtrInfo, Size: WideMemSizeInBits / 8);
4154
4155 // Load must be allowed and fast on the target.
4156 LLVMContext &C = MF.getFunction().getContext();
4157 auto &DL = MF.getDataLayout();
4158 unsigned Fast = 0;
4159 if (!getTargetLowering().allowsMemoryAccess(Context&: C, DL, Ty, MMO: *NewMMO, Fast: &Fast) ||
4160 !Fast)
4161 return false;
4162
4163 MatchInfo = [=](MachineIRBuilder &MIB) {
4164 MIB.setInstrAndDebugLoc(*LatestLoad);
4165 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(VReg: Dst) : Dst;
4166 MIB.buildLoad(Res: LoadDst, Addr: Ptr, MMO&: *NewMMO);
4167 if (NeedsBSwap)
4168 MIB.buildBSwap(Dst, Src0: LoadDst);
4169 };
4170 return true;
4171}
4172
4173bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
4174 MachineInstr *&ExtMI) const {
4175 auto &PHI = cast<GPhi>(Val&: MI);
4176 Register DstReg = PHI.getReg(Idx: 0);
4177
4178 // TODO: Extending a vector may be expensive, don't do this until heuristics
4179 // are better.
4180 if (MRI.getType(Reg: DstReg).isVector())
4181 return false;
4182
4183 // Try to match a phi, whose only use is an extend.
4184 if (!MRI.hasOneNonDBGUse(RegNo: DstReg))
4185 return false;
4186 ExtMI = &*MRI.use_instr_nodbg_begin(RegNo: DstReg);
4187 switch (ExtMI->getOpcode()) {
4188 case TargetOpcode::G_ANYEXT:
4189 return true; // G_ANYEXT is usually free.
4190 case TargetOpcode::G_ZEXT:
4191 case TargetOpcode::G_SEXT:
4192 break;
4193 default:
4194 return false;
4195 }
4196
4197 // If the target is likely to fold this extend away, don't propagate.
4198 if (Builder.getTII().isExtendLikelyToBeFolded(ExtMI&: *ExtMI, MRI))
4199 return false;
4200
4201 // We don't want to propagate the extends unless there's a good chance that
4202 // they'll be optimized in some way.
4203 // Collect the unique incoming values.
4204 SmallPtrSet<MachineInstr *, 4> InSrcs;
4205 for (unsigned I = 0; I < PHI.getNumIncomingValues(); ++I) {
4206 auto *DefMI = getDefIgnoringCopies(Reg: PHI.getIncomingValue(I), MRI);
4207 switch (DefMI->getOpcode()) {
4208 case TargetOpcode::G_LOAD:
4209 case TargetOpcode::G_TRUNC:
4210 case TargetOpcode::G_SEXT:
4211 case TargetOpcode::G_ZEXT:
4212 case TargetOpcode::G_ANYEXT:
4213 case TargetOpcode::G_CONSTANT:
4214 InSrcs.insert(Ptr: DefMI);
4215 // Don't try to propagate if there are too many places to create new
4216 // extends, chances are it'll increase code size.
4217 if (InSrcs.size() > 2)
4218 return false;
4219 break;
4220 default:
4221 return false;
4222 }
4223 }
4224 return true;
4225}
4226
4227void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
4228 MachineInstr *&ExtMI) const {
4229 auto &PHI = cast<GPhi>(Val&: MI);
4230 Register DstReg = ExtMI->getOperand(i: 0).getReg();
4231 LLT ExtTy = MRI.getType(Reg: DstReg);
4232
4233 // Propagate the extension into the block of each incoming reg's block.
4234 // Use a SetVector here because PHIs can have duplicate edges, and we want
4235 // deterministic iteration order.
4236 SmallSetVector<MachineInstr *, 8> SrcMIs;
4237 SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
4238 for (unsigned I = 0; I < PHI.getNumIncomingValues(); ++I) {
4239 auto SrcReg = PHI.getIncomingValue(I);
4240 auto *SrcMI = MRI.getVRegDef(Reg: SrcReg);
4241 if (!SrcMIs.insert(X: SrcMI))
4242 continue;
4243
4244 // Build an extend after each src inst.
4245 auto *MBB = SrcMI->getParent();
4246 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
4247 if (InsertPt != MBB->end() && InsertPt->isPHI())
4248 InsertPt = MBB->getFirstNonPHI();
4249
4250 Builder.setInsertPt(MBB&: *SrcMI->getParent(), II: InsertPt);
4251 Builder.setDebugLoc(MI.getDebugLoc());
4252 auto NewExt = Builder.buildExtOrTrunc(ExtOpc: ExtMI->getOpcode(), Res: ExtTy, Op: SrcReg);
4253 OldToNewSrcMap[SrcMI] = NewExt;
4254 }
4255
4256 // Create a new phi with the extended inputs.
4257 Builder.setInstrAndDebugLoc(MI);
4258 auto NewPhi = Builder.buildInstrNoInsert(Opcode: TargetOpcode::G_PHI);
4259 NewPhi.addDef(RegNo: DstReg);
4260 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI.operands())) {
4261 if (!MO.isReg()) {
4262 NewPhi.addMBB(MBB: MO.getMBB());
4263 continue;
4264 }
4265 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(Reg: MO.getReg())];
4266 NewPhi.addUse(RegNo: NewSrc->getOperand(i: 0).getReg());
4267 }
4268 Builder.insertInstr(MIB: NewPhi);
4269 ExtMI->eraseFromParent();
4270}
4271
4272bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
4273 Register &Reg) const {
4274 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
4275 // If we have a constant index, look for a G_BUILD_VECTOR source
4276 // and find the source register that the index maps to.
4277 Register SrcVec = MI.getOperand(i: 1).getReg();
4278 LLT SrcTy = MRI.getType(Reg: SrcVec);
4279 if (SrcTy.isScalableVector())
4280 return false;
4281
4282 auto Cst = getIConstantVRegValWithLookThrough(VReg: MI.getOperand(i: 2).getReg(), MRI);
4283 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
4284 return false;
4285
4286 unsigned VecIdx = Cst->Value.getZExtValue();
4287
4288 // Check if we have a build_vector or build_vector_trunc with an optional
4289 // trunc in front.
4290 MachineInstr *SrcVecMI = MRI.getVRegDef(Reg: SrcVec);
4291 if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) {
4292 SrcVecMI = MRI.getVRegDef(Reg: SrcVecMI->getOperand(i: 1).getReg());
4293 }
4294
4295 if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
4296 SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
4297 return false;
4298
4299 EVT Ty(getMVTForLLT(Ty: SrcTy));
4300 if (!MRI.hasOneNonDBGUse(RegNo: SrcVec) &&
4301 !getTargetLowering().aggressivelyPreferBuildVectorSources(VecVT: Ty))
4302 return false;
4303
4304 Reg = SrcVecMI->getOperand(i: VecIdx + 1).getReg();
4305 return true;
4306}
4307
4308void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
4309 Register &Reg) const {
4310 // Check the type of the register, since it may have come from a
4311 // G_BUILD_VECTOR_TRUNC.
4312 LLT ScalarTy = MRI.getType(Reg);
4313 Register DstReg = MI.getOperand(i: 0).getReg();
4314 LLT DstTy = MRI.getType(Reg: DstReg);
4315
4316 if (ScalarTy != DstTy) {
4317 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
4318 Builder.buildTrunc(Res: DstReg, Op: Reg);
4319 MI.eraseFromParent();
4320 return;
4321 }
4322 replaceSingleDefInstWithReg(MI, Replacement: Reg);
4323}
4324
4325bool CombinerHelper::matchExtractAllEltsFromBuildVector(
4326 MachineInstr &MI,
4327 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) const {
4328 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4329 // This combine tries to find build_vector's which have every source element
4330 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
4331 // the masked load scalarization is run late in the pipeline. There's already
4332 // a combine for a similar pattern starting from the extract, but that
4333 // doesn't attempt to do it if there are multiple uses of the build_vector,
4334 // which in this case is true. Starting the combine from the build_vector
4335 // feels more natural than trying to find sibling nodes of extracts.
4336 // E.g.
4337 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
4338 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
4339 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
4340 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
4341 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
4342 // ==>
4343 // replace ext{1,2,3,4} with %s{1,2,3,4}
4344
4345 Register DstReg = MI.getOperand(i: 0).getReg();
4346 LLT DstTy = MRI.getType(Reg: DstReg);
4347 unsigned NumElts = DstTy.getNumElements();
4348
4349 SmallBitVector ExtractedElts(NumElts);
4350 for (MachineInstr &II : MRI.use_nodbg_instructions(Reg: DstReg)) {
4351 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4352 return false;
4353 auto Cst = getIConstantVRegVal(VReg: II.getOperand(i: 2).getReg(), MRI);
4354 if (!Cst)
4355 return false;
4356 unsigned Idx = Cst->getZExtValue();
4357 if (Idx >= NumElts)
4358 return false; // Out of range.
4359 ExtractedElts.set(Idx);
4360 SrcDstPairs.emplace_back(
4361 Args: std::make_pair(x: MI.getOperand(i: Idx + 1).getReg(), y: &II));
4362 }
4363 // Match if every element was extracted.
4364 return ExtractedElts.all();
4365}
4366
4367void CombinerHelper::applyExtractAllEltsFromBuildVector(
4368 MachineInstr &MI,
4369 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) const {
4370 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4371 for (auto &Pair : SrcDstPairs) {
4372 auto *ExtMI = Pair.second;
4373 replaceRegWith(MRI, FromReg: ExtMI->getOperand(i: 0).getReg(), ToReg: Pair.first);
4374 ExtMI->eraseFromParent();
4375 }
4376 MI.eraseFromParent();
4377}
4378
4379void CombinerHelper::applyBuildFn(
4380 MachineInstr &MI,
4381 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4382 applyBuildFnNoErase(MI, MatchInfo);
4383 MI.eraseFromParent();
4384}
4385
4386void CombinerHelper::applyBuildFnNoErase(
4387 MachineInstr &MI,
4388 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4389 MatchInfo(Builder);
4390}
4391
4392bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
4393 BuildFnTy &MatchInfo) const {
4394 assert(MI.getOpcode() == TargetOpcode::G_OR);
4395
4396 Register Dst = MI.getOperand(i: 0).getReg();
4397 LLT Ty = MRI.getType(Reg: Dst);
4398 unsigned BitWidth = Ty.getScalarSizeInBits();
4399
4400 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4401 unsigned FshOpc = 0;
4402
4403 // Match (or (shl ...), (lshr ...)).
4404 if (!mi_match(R: Dst, MRI,
4405 // m_GOr() handles the commuted version as well.
4406 P: m_GOr(L: m_GShl(L: m_Reg(R&: ShlSrc), R: m_Reg(R&: ShlAmt)),
4407 R: m_GLShr(L: m_Reg(R&: LShrSrc), R: m_Reg(R&: LShrAmt)))))
4408 return false;
4409
4410 // Given constants C0 and C1 such that C0 + C1 is bit-width:
4411 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
4412 int64_t CstShlAmt, CstLShrAmt;
4413 if (mi_match(R: ShlAmt, MRI, P: m_ICstOrSplat(Cst&: CstShlAmt)) &&
4414 mi_match(R: LShrAmt, MRI, P: m_ICstOrSplat(Cst&: CstLShrAmt)) &&
4415 CstShlAmt + CstLShrAmt == BitWidth) {
4416 FshOpc = TargetOpcode::G_FSHR;
4417 Amt = LShrAmt;
4418
4419 } else if (mi_match(R: LShrAmt, MRI,
4420 P: m_GSub(L: m_SpecificICstOrSplat(RequestedValue: BitWidth), R: m_Reg(R&: Amt))) &&
4421 ShlAmt == Amt) {
4422 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
4423 FshOpc = TargetOpcode::G_FSHL;
4424
4425 } else if (mi_match(R: ShlAmt, MRI,
4426 P: m_GSub(L: m_SpecificICstOrSplat(RequestedValue: BitWidth), R: m_Reg(R&: Amt))) &&
4427 LShrAmt == Amt) {
4428 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
4429 FshOpc = TargetOpcode::G_FSHR;
4430
4431 } else {
4432 return false;
4433 }
4434
4435 LLT AmtTy = MRI.getType(Reg: Amt);
4436 if (!isLegalOrBeforeLegalizer(Query: {FshOpc, {Ty, AmtTy}}))
4437 return false;
4438
4439 MatchInfo = [=](MachineIRBuilder &B) {
4440 B.buildInstr(Opc: FshOpc, DstOps: {Dst}, SrcOps: {ShlSrc, LShrSrc, Amt});
4441 };
4442 return true;
4443}
4444
4445/// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
4446bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) const {
4447 unsigned Opc = MI.getOpcode();
4448 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4449 Register X = MI.getOperand(i: 1).getReg();
4450 Register Y = MI.getOperand(i: 2).getReg();
4451 if (X != Y)
4452 return false;
4453 unsigned RotateOpc =
4454 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4455 return isLegalOrBeforeLegalizer(Query: {RotateOpc, {MRI.getType(Reg: X), MRI.getType(Reg: Y)}});
4456}
4457
4458void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) const {
4459 unsigned Opc = MI.getOpcode();
4460 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4461 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4462 Observer.changingInstr(MI);
4463 MI.setDesc(Builder.getTII().get(Opcode: IsFSHL ? TargetOpcode::G_ROTL
4464 : TargetOpcode::G_ROTR));
4465 MI.removeOperand(OpNo: 2);
4466 Observer.changedInstr(MI);
4467}
4468
4469// Fold (rot x, c) -> (rot x, c % BitSize)
4470bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) const {
4471 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4472 MI.getOpcode() == TargetOpcode::G_ROTR);
4473 unsigned Bitsize =
4474 MRI.getType(Reg: MI.getOperand(i: 0).getReg()).getScalarSizeInBits();
4475 Register AmtReg = MI.getOperand(i: 2).getReg();
4476 bool OutOfRange = false;
4477 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
4478 if (auto *CI = dyn_cast<ConstantInt>(Val: C))
4479 OutOfRange |= CI->getValue().uge(RHS: Bitsize);
4480 return true;
4481 };
4482 return matchUnaryPredicate(MRI, Reg: AmtReg, Match: MatchOutOfRange) && OutOfRange;
4483}
4484
4485void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) const {
4486 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4487 MI.getOpcode() == TargetOpcode::G_ROTR);
4488 unsigned Bitsize =
4489 MRI.getType(Reg: MI.getOperand(i: 0).getReg()).getScalarSizeInBits();
4490 Register Amt = MI.getOperand(i: 2).getReg();
4491 LLT AmtTy = MRI.getType(Reg: Amt);
4492 auto Bits = Builder.buildConstant(Res: AmtTy, Val: Bitsize);
4493 Amt = Builder.buildURem(Dst: AmtTy, Src0: MI.getOperand(i: 2).getReg(), Src1: Bits).getReg(Idx: 0);
4494 Observer.changingInstr(MI);
4495 MI.getOperand(i: 2).setReg(Amt);
4496 Observer.changedInstr(MI);
4497}
4498
4499bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
4500 int64_t &MatchInfo) const {
4501 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4502 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(i: 1).getPredicate());
4503
4504 // We want to avoid calling KnownBits on the LHS if possible, as this combine
4505 // has no filter and runs on every G_ICMP instruction. We can avoid calling
4506 // KnownBits on the LHS in two cases:
4507 //
4508 // - The RHS is unknown: Constants are always on RHS. If the RHS is unknown
4509 // we cannot do any transforms so we can safely bail out early.
4510 // - The RHS is zero: we don't need to know the LHS to do unsigned <0 and
4511 // >=0.
4512 auto KnownRHS = VT->getKnownBits(R: MI.getOperand(i: 3).getReg());
4513 if (KnownRHS.isUnknown())
4514 return false;
4515
4516 std::optional<bool> KnownVal;
4517 if (KnownRHS.isZero()) {
4518 // ? uge 0 -> always true
4519 // ? ult 0 -> always false
4520 if (Pred == CmpInst::ICMP_UGE)
4521 KnownVal = true;
4522 else if (Pred == CmpInst::ICMP_ULT)
4523 KnownVal = false;
4524 }
4525
4526 if (!KnownVal) {
4527 auto KnownLHS = VT->getKnownBits(R: MI.getOperand(i: 2).getReg());
4528 KnownVal = ICmpInst::compare(LHS: KnownLHS, RHS: KnownRHS, Pred);
4529 }
4530
4531 if (!KnownVal)
4532 return false;
4533 MatchInfo =
4534 *KnownVal
4535 ? getICmpTrueVal(TLI: getTargetLowering(),
4536 /*IsVector = */
4537 MRI.getType(Reg: MI.getOperand(i: 0).getReg()).isVector(),
4538 /* IsFP = */ false)
4539 : 0;
4540 return true;
4541}
4542
4543bool CombinerHelper::matchICmpToLHSKnownBits(
4544 MachineInstr &MI,
4545 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4546 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4547 // Given:
4548 //
4549 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4550 // %cmp = G_ICMP ne %x, 0
4551 //
4552 // Or:
4553 //
4554 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4555 // %cmp = G_ICMP eq %x, 1
4556 //
4557 // We can replace %cmp with %x assuming true is 1 on the target.
4558 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(i: 1).getPredicate());
4559 if (!CmpInst::isEquality(pred: Pred))
4560 return false;
4561 Register Dst = MI.getOperand(i: 0).getReg();
4562 LLT DstTy = MRI.getType(Reg: Dst);
4563 if (getICmpTrueVal(TLI: getTargetLowering(), IsVector: DstTy.isVector(),
4564 /* IsFP = */ false) != 1)
4565 return false;
4566 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4567 if (!mi_match(R: MI.getOperand(i: 3).getReg(), MRI, P: m_SpecificICst(RequestedValue: OneOrZero)))
4568 return false;
4569 Register LHS = MI.getOperand(i: 2).getReg();
4570 auto KnownLHS = VT->getKnownBits(R: LHS);
4571 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4572 return false;
4573 // Make sure replacing Dst with the LHS is a legal operation.
4574 LLT LHSTy = MRI.getType(Reg: LHS);
4575 unsigned LHSSize = LHSTy.getSizeInBits();
4576 unsigned DstSize = DstTy.getSizeInBits();
4577 unsigned Op = TargetOpcode::COPY;
4578 if (DstSize != LHSSize)
4579 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4580 if (!isLegalOrBeforeLegalizer(Query: {Op, {DstTy, LHSTy}}))
4581 return false;
4582 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Opc: Op, DstOps: {Dst}, SrcOps: {LHS}); };
4583 return true;
4584}
4585
4586// Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4587bool CombinerHelper::matchAndOrDisjointMask(
4588 MachineInstr &MI,
4589 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4590 assert(MI.getOpcode() == TargetOpcode::G_AND);
4591
4592 // Ignore vector types to simplify matching the two constants.
4593 // TODO: do this for vectors and scalars via a demanded bits analysis.
4594 LLT Ty = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
4595 if (Ty.isVector())
4596 return false;
4597
4598 Register Src;
4599 Register AndMaskReg;
4600 int64_t AndMaskBits;
4601 int64_t OrMaskBits;
4602 if (!mi_match(MI, MRI,
4603 P: m_GAnd(L: m_GOr(L: m_Reg(R&: Src), R: m_ICst(Cst&: OrMaskBits)),
4604 R: m_all_of(preds: m_ICst(Cst&: AndMaskBits), preds: m_Reg(R&: AndMaskReg)))))
4605 return false;
4606
4607 // Check if OrMask could turn on any bits in Src.
4608 if (AndMaskBits & OrMaskBits)
4609 return false;
4610
4611 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4612 Observer.changingInstr(MI);
4613 // Canonicalize the result to have the constant on the RHS.
4614 if (MI.getOperand(i: 1).getReg() == AndMaskReg)
4615 MI.getOperand(i: 2).setReg(AndMaskReg);
4616 MI.getOperand(i: 1).setReg(Src);
4617 Observer.changedInstr(MI);
4618 };
4619 return true;
4620}
4621
4622/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4623bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4624 MachineInstr &MI,
4625 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4626 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4627 Register Dst = MI.getOperand(i: 0).getReg();
4628 Register Src = MI.getOperand(i: 1).getReg();
4629 LLT Ty = MRI.getType(Reg: Src);
4630 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
4631 if (!LI || !LI->isLegalOrCustom(Query: {TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4632 return false;
4633 int64_t Width = MI.getOperand(i: 2).getImm();
4634 Register ShiftSrc;
4635 int64_t ShiftImm;
4636 if (!mi_match(
4637 R: Src, MRI,
4638 P: m_OneNonDBGUse(SP: m_any_of(preds: m_GAShr(L: m_Reg(R&: ShiftSrc), R: m_ICst(Cst&: ShiftImm)),
4639 preds: m_GLShr(L: m_Reg(R&: ShiftSrc), R: m_ICst(Cst&: ShiftImm))))))
4640 return false;
4641 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4642 return false;
4643
4644 MatchInfo = [=](MachineIRBuilder &B) {
4645 auto Cst1 = B.buildConstant(Res: ExtractTy, Val: ShiftImm);
4646 auto Cst2 = B.buildConstant(Res: ExtractTy, Val: Width);
4647 B.buildSbfx(Dst, Src: ShiftSrc, LSB: Cst1, Width: Cst2);
4648 };
4649 return true;
4650}
4651
4652/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4653bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
4654 BuildFnTy &MatchInfo) const {
4655 GAnd *And = cast<GAnd>(Val: &MI);
4656 Register Dst = And->getReg(Idx: 0);
4657 LLT Ty = MRI.getType(Reg: Dst);
4658 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
4659 // Note that isLegalOrBeforeLegalizer is stricter and does not take custom
4660 // into account.
4661 if (LI && !LI->isLegalOrCustom(Query: {TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
4662 return false;
4663
4664 int64_t AndImm, LSBImm;
4665 Register ShiftSrc;
4666 const unsigned Size = Ty.getScalarSizeInBits();
4667 if (!mi_match(R: And->getReg(Idx: 0), MRI,
4668 P: m_GAnd(L: m_OneNonDBGUse(SP: m_GLShr(L: m_Reg(R&: ShiftSrc), R: m_ICst(Cst&: LSBImm))),
4669 R: m_ICst(Cst&: AndImm))))
4670 return false;
4671
4672 // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4673 auto MaybeMask = static_cast<uint64_t>(AndImm);
4674 if (MaybeMask & (MaybeMask + 1))
4675 return false;
4676
4677 // LSB must fit within the register.
4678 if (static_cast<uint64_t>(LSBImm) >= Size)
4679 return false;
4680
4681 uint64_t Width = APInt(Size, AndImm).countr_one();
4682 MatchInfo = [=](MachineIRBuilder &B) {
4683 auto WidthCst = B.buildConstant(Res: ExtractTy, Val: Width);
4684 auto LSBCst = B.buildConstant(Res: ExtractTy, Val: LSBImm);
4685 B.buildInstr(Opc: TargetOpcode::G_UBFX, DstOps: {Dst}, SrcOps: {ShiftSrc, LSBCst, WidthCst});
4686 };
4687 return true;
4688}
4689
4690bool CombinerHelper::matchBitfieldExtractFromShr(
4691 MachineInstr &MI,
4692 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4693 const unsigned Opcode = MI.getOpcode();
4694 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4695
4696 const Register Dst = MI.getOperand(i: 0).getReg();
4697
4698 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4699 ? TargetOpcode::G_SBFX
4700 : TargetOpcode::G_UBFX;
4701
4702 // Check if the type we would use for the extract is legal
4703 LLT Ty = MRI.getType(Reg: Dst);
4704 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
4705 if (!LI || !LI->isLegalOrCustom(Query: {ExtrOpcode, {Ty, ExtractTy}}))
4706 return false;
4707
4708 Register ShlSrc;
4709 int64_t ShrAmt;
4710 int64_t ShlAmt;
4711 const unsigned Size = Ty.getScalarSizeInBits();
4712
4713 // Try to match shr (shl x, c1), c2
4714 if (!mi_match(R: Dst, MRI,
4715 P: m_BinOp(Opcode,
4716 L: m_OneNonDBGUse(SP: m_GShl(L: m_Reg(R&: ShlSrc), R: m_ICst(Cst&: ShlAmt))),
4717 R: m_ICst(Cst&: ShrAmt))))
4718 return false;
4719
4720 // Make sure that the shift sizes can fit a bitfield extract
4721 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4722 return false;
4723
4724 // Skip this combine if the G_SEXT_INREG combine could handle it
4725 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4726 return false;
4727
4728 // Calculate start position and width of the extract
4729 const int64_t Pos = ShrAmt - ShlAmt;
4730 const int64_t Width = Size - ShrAmt;
4731
4732 MatchInfo = [=](MachineIRBuilder &B) {
4733 auto WidthCst = B.buildConstant(Res: ExtractTy, Val: Width);
4734 auto PosCst = B.buildConstant(Res: ExtractTy, Val: Pos);
4735 B.buildInstr(Opc: ExtrOpcode, DstOps: {Dst}, SrcOps: {ShlSrc, PosCst, WidthCst});
4736 };
4737 return true;
4738}
4739
4740bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4741 MachineInstr &MI,
4742 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
4743 const unsigned Opcode = MI.getOpcode();
4744 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4745
4746 const Register Dst = MI.getOperand(i: 0).getReg();
4747 LLT Ty = MRI.getType(Reg: Dst);
4748 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
4749 if (LI && !LI->isLegalOrCustom(Query: {TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
4750 return false;
4751
4752 // Try to match shr (and x, c1), c2
4753 Register AndSrc;
4754 int64_t ShrAmt;
4755 int64_t SMask;
4756 if (!mi_match(R: Dst, MRI,
4757 P: m_BinOp(Opcode,
4758 L: m_OneNonDBGUse(SP: m_GAnd(L: m_Reg(R&: AndSrc), R: m_ICst(Cst&: SMask))),
4759 R: m_ICst(Cst&: ShrAmt))))
4760 return false;
4761
4762 const unsigned Size = Ty.getScalarSizeInBits();
4763 if (ShrAmt < 0 || ShrAmt >= Size)
4764 return false;
4765
4766 // If the shift subsumes the mask, emit the 0 directly.
4767 if (0 == (SMask >> ShrAmt)) {
4768 MatchInfo = [=](MachineIRBuilder &B) {
4769 B.buildConstant(Res: Dst, Val: 0);
4770 };
4771 return true;
4772 }
4773
4774 // Check that ubfx can do the extraction, with no holes in the mask.
4775 uint64_t UMask = SMask;
4776 UMask |= maskTrailingOnes<uint64_t>(N: ShrAmt);
4777 UMask &= maskTrailingOnes<uint64_t>(N: Size);
4778 if (!isMask_64(Value: UMask))
4779 return false;
4780
4781 // Calculate start position and width of the extract.
4782 const int64_t Pos = ShrAmt;
4783 const int64_t Width = llvm::countr_one(Value: UMask) - ShrAmt;
4784
4785 // It's preferable to keep the shift, rather than form G_SBFX.
4786 // TODO: remove the G_AND via demanded bits analysis.
4787 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
4788 return false;
4789
4790 MatchInfo = [=](MachineIRBuilder &B) {
4791 auto WidthCst = B.buildConstant(Res: ExtractTy, Val: Width);
4792 auto PosCst = B.buildConstant(Res: ExtractTy, Val: Pos);
4793 B.buildInstr(Opc: TargetOpcode::G_UBFX, DstOps: {Dst}, SrcOps: {AndSrc, PosCst, WidthCst});
4794 };
4795 return true;
4796}
4797
4798bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4799 MachineInstr &MI) const {
4800 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
4801
4802 Register Src1Reg = PtrAdd.getBaseReg();
4803 auto *Src1Def = getOpcodeDef<GPtrAdd>(Reg: Src1Reg, MRI);
4804 if (!Src1Def)
4805 return false;
4806
4807 Register Src2Reg = PtrAdd.getOffsetReg();
4808
4809 if (MRI.hasOneNonDBGUse(RegNo: Src1Reg))
4810 return false;
4811
4812 auto C1 = getIConstantVRegVal(VReg: Src1Def->getOffsetReg(), MRI);
4813 if (!C1)
4814 return false;
4815 auto C2 = getIConstantVRegVal(VReg: Src2Reg, MRI);
4816 if (!C2)
4817 return false;
4818
4819 const APInt &C1APIntVal = *C1;
4820 const APInt &C2APIntVal = *C2;
4821 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4822
4823 for (auto &UseMI : MRI.use_nodbg_instructions(Reg: PtrAdd.getReg(Idx: 0))) {
4824 // This combine may end up running before ptrtoint/inttoptr combines
4825 // manage to eliminate redundant conversions, so try to look through them.
4826 MachineInstr *ConvUseMI = &UseMI;
4827 unsigned ConvUseOpc = ConvUseMI->getOpcode();
4828 while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4829 ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4830 Register DefReg = ConvUseMI->getOperand(i: 0).getReg();
4831 if (!MRI.hasOneNonDBGUse(RegNo: DefReg))
4832 break;
4833 ConvUseMI = &*MRI.use_instr_nodbg_begin(RegNo: DefReg);
4834 ConvUseOpc = ConvUseMI->getOpcode();
4835 }
4836 auto *LdStMI = dyn_cast<GLoadStore>(Val: ConvUseMI);
4837 if (!LdStMI)
4838 continue;
4839 // Is x[offset2] already not a legal addressing mode? If so then
4840 // reassociating the constants breaks nothing (we test offset2 because
4841 // that's the one we hope to fold into the load or store).
4842 TargetLoweringBase::AddrMode AM;
4843 AM.HasBaseReg = true;
4844 AM.BaseOffs = C2APIntVal.getSExtValue();
4845 unsigned AS = MRI.getType(Reg: LdStMI->getPointerReg()).getAddressSpace();
4846 Type *AccessTy = getTypeForLLT(Ty: LdStMI->getMMO().getMemoryType(),
4847 C&: PtrAdd.getMF()->getFunction().getContext());
4848 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4849 if (!TLI.isLegalAddressingMode(DL: PtrAdd.getMF()->getDataLayout(), AM,
4850 Ty: AccessTy, AddrSpace: AS))
4851 continue;
4852
4853 // Would x[offset1+offset2] still be a legal addressing mode?
4854 AM.BaseOffs = CombinedValue;
4855 if (!TLI.isLegalAddressingMode(DL: PtrAdd.getMF()->getDataLayout(), AM,
4856 Ty: AccessTy, AddrSpace: AS))
4857 return true;
4858 }
4859
4860 return false;
4861}
4862
4863bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
4864 MachineInstr *RHS,
4865 BuildFnTy &MatchInfo) const {
4866 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4867 Register Src1Reg = MI.getOperand(i: 1).getReg();
4868 if (RHS->getOpcode() != TargetOpcode::G_ADD)
4869 return false;
4870 auto C2 = getIConstantVRegVal(VReg: RHS->getOperand(i: 2).getReg(), MRI);
4871 if (!C2)
4872 return false;
4873
4874 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4875 LLT PtrTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
4876
4877 auto NewBase =
4878 Builder.buildPtrAdd(Res: PtrTy, Op0: Src1Reg, Op1: RHS->getOperand(i: 1).getReg());
4879 Observer.changingInstr(MI);
4880 MI.getOperand(i: 1).setReg(NewBase.getReg(Idx: 0));
4881 MI.getOperand(i: 2).setReg(RHS->getOperand(i: 2).getReg());
4882 Observer.changedInstr(MI);
4883 };
4884 return !reassociationCanBreakAddressingModePattern(MI);
4885}
4886
4887bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
4888 MachineInstr *LHS,
4889 MachineInstr *RHS,
4890 BuildFnTy &MatchInfo) const {
4891 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4892 // if and only if (G_PTR_ADD X, C) has one use.
4893 Register LHSBase;
4894 std::optional<ValueAndVReg> LHSCstOff;
4895 if (!mi_match(R: MI.getBaseReg(), MRI,
4896 P: m_OneNonDBGUse(SP: m_GPtrAdd(L: m_Reg(R&: LHSBase), R: m_GCst(ValReg&: LHSCstOff)))))
4897 return false;
4898
4899 auto *LHSPtrAdd = cast<GPtrAdd>(Val: LHS);
4900 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4901 // When we change LHSPtrAdd's offset register we might cause it to use a reg
4902 // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4903 // doesn't happen.
4904 LHSPtrAdd->moveBefore(MovePos: &MI);
4905 Register RHSReg = MI.getOffsetReg();
4906 // set VReg will cause type mismatch if it comes from extend/trunc
4907 auto NewCst = B.buildConstant(Res: MRI.getType(Reg: RHSReg), Val: LHSCstOff->Value);
4908 Observer.changingInstr(MI);
4909 MI.getOperand(i: 2).setReg(NewCst.getReg(Idx: 0));
4910 Observer.changedInstr(MI);
4911 Observer.changingInstr(MI&: *LHSPtrAdd);
4912 LHSPtrAdd->getOperand(i: 2).setReg(RHSReg);
4913 Observer.changedInstr(MI&: *LHSPtrAdd);
4914 };
4915 return !reassociationCanBreakAddressingModePattern(MI);
4916}
4917
4918bool CombinerHelper::matchReassocFoldConstantsInSubTree(
4919 GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS,
4920 BuildFnTy &MatchInfo) const {
4921 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4922 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(Val: LHS);
4923 if (!LHSPtrAdd)
4924 return false;
4925
4926 Register Src2Reg = MI.getOperand(i: 2).getReg();
4927 Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4928 Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4929 auto C1 = getIConstantVRegVal(VReg: LHSSrc2, MRI);
4930 if (!C1)
4931 return false;
4932 auto C2 = getIConstantVRegVal(VReg: Src2Reg, MRI);
4933 if (!C2)
4934 return false;
4935
4936 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4937 auto NewCst = B.buildConstant(Res: MRI.getType(Reg: Src2Reg), Val: *C1 + *C2);
4938 Observer.changingInstr(MI);
4939 MI.getOperand(i: 1).setReg(LHSSrc1);
4940 MI.getOperand(i: 2).setReg(NewCst.getReg(Idx: 0));
4941 Observer.changedInstr(MI);
4942 };
4943 return !reassociationCanBreakAddressingModePattern(MI);
4944}
4945
4946bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
4947 BuildFnTy &MatchInfo) const {
4948 auto &PtrAdd = cast<GPtrAdd>(Val&: MI);
4949 // We're trying to match a few pointer computation patterns here for
4950 // re-association opportunities.
4951 // 1) Isolating a constant operand to be on the RHS, e.g.:
4952 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4953 //
4954 // 2) Folding two constants in each sub-tree as long as such folding
4955 // doesn't break a legal addressing mode.
4956 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4957 //
4958 // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4959 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4960 // iif (G_PTR_ADD X, C) has one use.
4961 MachineInstr *LHS = MRI.getVRegDef(Reg: PtrAdd.getBaseReg());
4962 MachineInstr *RHS = MRI.getVRegDef(Reg: PtrAdd.getOffsetReg());
4963
4964 // Try to match example 2.
4965 if (matchReassocFoldConstantsInSubTree(MI&: PtrAdd, LHS, RHS, MatchInfo))
4966 return true;
4967
4968 // Try to match example 3.
4969 if (matchReassocConstantInnerLHS(MI&: PtrAdd, LHS, RHS, MatchInfo))
4970 return true;
4971
4972 // Try to match example 1.
4973 if (matchReassocConstantInnerRHS(MI&: PtrAdd, RHS, MatchInfo))
4974 return true;
4975
4976 return false;
4977}
4978bool CombinerHelper::tryReassocBinOp(unsigned Opc, Register DstReg,
4979 Register OpLHS, Register OpRHS,
4980 BuildFnTy &MatchInfo) const {
4981 LLT OpRHSTy = MRI.getType(Reg: OpRHS);
4982 MachineInstr *OpLHSDef = MRI.getVRegDef(Reg: OpLHS);
4983
4984 if (OpLHSDef->getOpcode() != Opc)
4985 return false;
4986
4987 MachineInstr *OpRHSDef = MRI.getVRegDef(Reg: OpRHS);
4988 Register OpLHSLHS = OpLHSDef->getOperand(i: 1).getReg();
4989 Register OpLHSRHS = OpLHSDef->getOperand(i: 2).getReg();
4990
4991 // If the inner op is (X op C), pull the constant out so it can be folded with
4992 // other constants in the expression tree. Folding is not guaranteed so we
4993 // might have (C1 op C2). In that case do not pull a constant out because it
4994 // won't help and can lead to infinite loops.
4995 if (isConstantOrConstantSplatVector(MI&: *MRI.getVRegDef(Reg: OpLHSRHS), MRI) &&
4996 !isConstantOrConstantSplatVector(MI&: *MRI.getVRegDef(Reg: OpLHSLHS), MRI)) {
4997 if (isConstantOrConstantSplatVector(MI&: *OpRHSDef, MRI)) {
4998 // (Opc (Opc X, C1), C2) -> (Opc X, (Opc C1, C2))
4999 MatchInfo = [=](MachineIRBuilder &B) {
5000 auto NewCst = B.buildInstr(Opc, DstOps: {OpRHSTy}, SrcOps: {OpLHSRHS, OpRHS});
5001 B.buildInstr(Opc, DstOps: {DstReg}, SrcOps: {OpLHSLHS, NewCst});
5002 };
5003 return true;
5004 }
5005 if (getTargetLowering().isReassocProfitable(MRI, N0: OpLHS, N1: OpRHS)) {
5006 // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
5007 // iff (op x, c1) has one use
5008 MatchInfo = [=](MachineIRBuilder &B) {
5009 auto NewLHSLHS = B.buildInstr(Opc, DstOps: {OpRHSTy}, SrcOps: {OpLHSLHS, OpRHS});
5010 B.buildInstr(Opc, DstOps: {DstReg}, SrcOps: {NewLHSLHS, OpLHSRHS});
5011 };
5012 return true;
5013 }
5014 }
5015
5016 return false;
5017}
5018
5019bool CombinerHelper::matchReassocCommBinOp(MachineInstr &MI,
5020 BuildFnTy &MatchInfo) const {
5021 // We don't check if the reassociation will break a legal addressing mode
5022 // here since pointer arithmetic is handled by G_PTR_ADD.
5023 unsigned Opc = MI.getOpcode();
5024 Register DstReg = MI.getOperand(i: 0).getReg();
5025 Register LHSReg = MI.getOperand(i: 1).getReg();
5026 Register RHSReg = MI.getOperand(i: 2).getReg();
5027
5028 if (tryReassocBinOp(Opc, DstReg, OpLHS: LHSReg, OpRHS: RHSReg, MatchInfo))
5029 return true;
5030 if (tryReassocBinOp(Opc, DstReg, OpLHS: RHSReg, OpRHS: LHSReg, MatchInfo))
5031 return true;
5032 return false;
5033}
5034
5035bool CombinerHelper::matchConstantFoldCastOp(MachineInstr &MI,
5036 APInt &MatchInfo) const {
5037 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
5038 Register SrcOp = MI.getOperand(i: 1).getReg();
5039
5040 if (auto MaybeCst = ConstantFoldCastOp(Opcode: MI.getOpcode(), DstTy, Op0: SrcOp, MRI)) {
5041 MatchInfo = *MaybeCst;
5042 return true;
5043 }
5044
5045 return false;
5046}
5047
5048bool CombinerHelper::matchConstantFoldBinOp(MachineInstr &MI,
5049 APInt &MatchInfo) const {
5050 Register Op1 = MI.getOperand(i: 1).getReg();
5051 Register Op2 = MI.getOperand(i: 2).getReg();
5052 auto MaybeCst = ConstantFoldBinOp(Opcode: MI.getOpcode(), Op1, Op2, MRI);
5053 if (!MaybeCst)
5054 return false;
5055 MatchInfo = *MaybeCst;
5056 return true;
5057}
5058
5059bool CombinerHelper::matchConstantFoldFPBinOp(MachineInstr &MI,
5060 ConstantFP *&MatchInfo) const {
5061 Register Op1 = MI.getOperand(i: 1).getReg();
5062 Register Op2 = MI.getOperand(i: 2).getReg();
5063 auto MaybeCst = ConstantFoldFPBinOp(Opcode: MI.getOpcode(), Op1, Op2, MRI);
5064 if (!MaybeCst)
5065 return false;
5066 MatchInfo =
5067 ConstantFP::get(Context&: MI.getMF()->getFunction().getContext(), V: *MaybeCst);
5068 return true;
5069}
5070
5071bool CombinerHelper::matchConstantFoldFMA(MachineInstr &MI,
5072 ConstantFP *&MatchInfo) const {
5073 assert(MI.getOpcode() == TargetOpcode::G_FMA ||
5074 MI.getOpcode() == TargetOpcode::G_FMAD);
5075 auto [_, Op1, Op2, Op3] = MI.getFirst4Regs();
5076
5077 const ConstantFP *Op3Cst = getConstantFPVRegVal(VReg: Op3, MRI);
5078 if (!Op3Cst)
5079 return false;
5080
5081 const ConstantFP *Op2Cst = getConstantFPVRegVal(VReg: Op2, MRI);
5082 if (!Op2Cst)
5083 return false;
5084
5085 const ConstantFP *Op1Cst = getConstantFPVRegVal(VReg: Op1, MRI);
5086 if (!Op1Cst)
5087 return false;
5088
5089 APFloat Op1F = Op1Cst->getValueAPF();
5090 Op1F.fusedMultiplyAdd(Multiplicand: Op2Cst->getValueAPF(), Addend: Op3Cst->getValueAPF(),
5091 RM: APFloat::rmNearestTiesToEven);
5092 MatchInfo = ConstantFP::get(Context&: MI.getMF()->getFunction().getContext(), V: Op1F);
5093 return true;
5094}
5095
5096bool CombinerHelper::matchNarrowBinopFeedingAnd(
5097 MachineInstr &MI,
5098 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
5099 // Look for a binop feeding into an AND with a mask:
5100 //
5101 // %add = G_ADD %lhs, %rhs
5102 // %and = G_AND %add, 000...11111111
5103 //
5104 // Check if it's possible to perform the binop at a narrower width and zext
5105 // back to the original width like so:
5106 //
5107 // %narrow_lhs = G_TRUNC %lhs
5108 // %narrow_rhs = G_TRUNC %rhs
5109 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
5110 // %new_add = G_ZEXT %narrow_add
5111 // %and = G_AND %new_add, 000...11111111
5112 //
5113 // This can allow later combines to eliminate the G_AND if it turns out
5114 // that the mask is irrelevant.
5115 assert(MI.getOpcode() == TargetOpcode::G_AND);
5116 Register Dst = MI.getOperand(i: 0).getReg();
5117 Register AndLHS = MI.getOperand(i: 1).getReg();
5118 Register AndRHS = MI.getOperand(i: 2).getReg();
5119 LLT WideTy = MRI.getType(Reg: Dst);
5120
5121 // If the potential binop has more than one use, then it's possible that one
5122 // of those uses will need its full width.
5123 if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(RegNo: AndLHS))
5124 return false;
5125
5126 // Check if the LHS feeding the AND is impacted by the high bits that we're
5127 // masking out.
5128 //
5129 // e.g. for 64-bit x, y:
5130 //
5131 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
5132 MachineInstr *LHSInst = getDefIgnoringCopies(Reg: AndLHS, MRI);
5133 if (!LHSInst)
5134 return false;
5135 unsigned LHSOpc = LHSInst->getOpcode();
5136 switch (LHSOpc) {
5137 default:
5138 return false;
5139 case TargetOpcode::G_ADD:
5140 case TargetOpcode::G_SUB:
5141 case TargetOpcode::G_MUL:
5142 case TargetOpcode::G_AND:
5143 case TargetOpcode::G_OR:
5144 case TargetOpcode::G_XOR:
5145 break;
5146 }
5147
5148 // Find the mask on the RHS.
5149 auto Cst = getIConstantVRegValWithLookThrough(VReg: AndRHS, MRI);
5150 if (!Cst)
5151 return false;
5152 auto Mask = Cst->Value;
5153 if (!Mask.isMask())
5154 return false;
5155
5156 // No point in combining if there's nothing to truncate.
5157 unsigned NarrowWidth = Mask.countr_one();
5158 if (NarrowWidth == WideTy.getSizeInBits())
5159 return false;
5160 LLT NarrowTy = LLT::scalar(SizeInBits: NarrowWidth);
5161
5162 // Check if adding the zext + truncates could be harmful.
5163 auto &MF = *MI.getMF();
5164 const auto &TLI = getTargetLowering();
5165 LLVMContext &Ctx = MF.getFunction().getContext();
5166 if (!TLI.isTruncateFree(FromTy: WideTy, ToTy: NarrowTy, Ctx) ||
5167 !TLI.isZExtFree(FromTy: NarrowTy, ToTy: WideTy, Ctx))
5168 return false;
5169 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
5170 !isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
5171 return false;
5172 Register BinOpLHS = LHSInst->getOperand(i: 1).getReg();
5173 Register BinOpRHS = LHSInst->getOperand(i: 2).getReg();
5174 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5175 auto NarrowLHS = Builder.buildTrunc(Res: NarrowTy, Op: BinOpLHS);
5176 auto NarrowRHS = Builder.buildTrunc(Res: NarrowTy, Op: BinOpRHS);
5177 auto NarrowBinOp =
5178 Builder.buildInstr(Opc: LHSOpc, DstOps: {NarrowTy}, SrcOps: {NarrowLHS, NarrowRHS});
5179 auto Ext = Builder.buildZExt(Res: WideTy, Op: NarrowBinOp);
5180 Observer.changingInstr(MI);
5181 MI.getOperand(i: 1).setReg(Ext.getReg(Idx: 0));
5182 Observer.changedInstr(MI);
5183 };
5184 return true;
5185}
5186
5187bool CombinerHelper::matchMulOBy2(MachineInstr &MI,
5188 BuildFnTy &MatchInfo) const {
5189 unsigned Opc = MI.getOpcode();
5190 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
5191
5192 if (!mi_match(R: MI.getOperand(i: 3).getReg(), MRI, P: m_SpecificICstOrSplat(RequestedValue: 2)))
5193 return false;
5194
5195 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5196 Observer.changingInstr(MI);
5197 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
5198 : TargetOpcode::G_SADDO;
5199 MI.setDesc(Builder.getTII().get(Opcode: NewOpc));
5200 MI.getOperand(i: 3).setReg(MI.getOperand(i: 2).getReg());
5201 Observer.changedInstr(MI);
5202 };
5203 return true;
5204}
5205
5206bool CombinerHelper::matchMulOBy0(MachineInstr &MI,
5207 BuildFnTy &MatchInfo) const {
5208 // (G_*MULO x, 0) -> 0 + no carry out
5209 assert(MI.getOpcode() == TargetOpcode::G_UMULO ||
5210 MI.getOpcode() == TargetOpcode::G_SMULO);
5211 if (!mi_match(R: MI.getOperand(i: 3).getReg(), MRI, P: m_SpecificICstOrSplat(RequestedValue: 0)))
5212 return false;
5213 Register Dst = MI.getOperand(i: 0).getReg();
5214 Register Carry = MI.getOperand(i: 1).getReg();
5215 if (!isConstantLegalOrBeforeLegalizer(Ty: MRI.getType(Reg: Dst)) ||
5216 !isConstantLegalOrBeforeLegalizer(Ty: MRI.getType(Reg: Carry)))
5217 return false;
5218 MatchInfo = [=](MachineIRBuilder &B) {
5219 B.buildConstant(Res: Dst, Val: 0);
5220 B.buildConstant(Res: Carry, Val: 0);
5221 };
5222 return true;
5223}
5224
5225bool CombinerHelper::matchAddEToAddO(MachineInstr &MI,
5226 BuildFnTy &MatchInfo) const {
5227 // (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
5228 // (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
5229 assert(MI.getOpcode() == TargetOpcode::G_UADDE ||
5230 MI.getOpcode() == TargetOpcode::G_SADDE ||
5231 MI.getOpcode() == TargetOpcode::G_USUBE ||
5232 MI.getOpcode() == TargetOpcode::G_SSUBE);
5233 if (!mi_match(R: MI.getOperand(i: 4).getReg(), MRI, P: m_SpecificICstOrSplat(RequestedValue: 0)))
5234 return false;
5235 MatchInfo = [&](MachineIRBuilder &B) {
5236 unsigned NewOpcode;
5237 switch (MI.getOpcode()) {
5238 case TargetOpcode::G_UADDE:
5239 NewOpcode = TargetOpcode::G_UADDO;
5240 break;
5241 case TargetOpcode::G_SADDE:
5242 NewOpcode = TargetOpcode::G_SADDO;
5243 break;
5244 case TargetOpcode::G_USUBE:
5245 NewOpcode = TargetOpcode::G_USUBO;
5246 break;
5247 case TargetOpcode::G_SSUBE:
5248 NewOpcode = TargetOpcode::G_SSUBO;
5249 break;
5250 }
5251 Observer.changingInstr(MI);
5252 MI.setDesc(B.getTII().get(Opcode: NewOpcode));
5253 MI.removeOperand(OpNo: 4);
5254 Observer.changedInstr(MI);
5255 };
5256 return true;
5257}
5258
5259bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI,
5260 BuildFnTy &MatchInfo) const {
5261 assert(MI.getOpcode() == TargetOpcode::G_SUB);
5262 Register Dst = MI.getOperand(i: 0).getReg();
5263 // (x + y) - z -> x (if y == z)
5264 // (x + y) - z -> y (if x == z)
5265 Register X, Y, Z;
5266 if (mi_match(R: Dst, MRI, P: m_GSub(L: m_GAdd(L: m_Reg(R&: X), R: m_Reg(R&: Y)), R: m_Reg(R&: Z)))) {
5267 Register ReplaceReg;
5268 int64_t CstX, CstY;
5269 if (Y == Z || (mi_match(R: Y, MRI, P: m_ICstOrSplat(Cst&: CstY)) &&
5270 mi_match(R: Z, MRI, P: m_SpecificICstOrSplat(RequestedValue: CstY))))
5271 ReplaceReg = X;
5272 else if (X == Z || (mi_match(R: X, MRI, P: m_ICstOrSplat(Cst&: CstX)) &&
5273 mi_match(R: Z, MRI, P: m_SpecificICstOrSplat(RequestedValue: CstX))))
5274 ReplaceReg = Y;
5275 if (ReplaceReg) {
5276 MatchInfo = [=](MachineIRBuilder &B) { B.buildCopy(Res: Dst, Op: ReplaceReg); };
5277 return true;
5278 }
5279 }
5280
5281 // x - (y + z) -> 0 - y (if x == z)
5282 // x - (y + z) -> 0 - z (if x == y)
5283 if (mi_match(R: Dst, MRI, P: m_GSub(L: m_Reg(R&: X), R: m_GAdd(L: m_Reg(R&: Y), R: m_Reg(R&: Z))))) {
5284 Register ReplaceReg;
5285 int64_t CstX;
5286 if (X == Z || (mi_match(R: X, MRI, P: m_ICstOrSplat(Cst&: CstX)) &&
5287 mi_match(R: Z, MRI, P: m_SpecificICstOrSplat(RequestedValue: CstX))))
5288 ReplaceReg = Y;
5289 else if (X == Y || (mi_match(R: X, MRI, P: m_ICstOrSplat(Cst&: CstX)) &&
5290 mi_match(R: Y, MRI, P: m_SpecificICstOrSplat(RequestedValue: CstX))))
5291 ReplaceReg = Z;
5292 if (ReplaceReg) {
5293 MatchInfo = [=](MachineIRBuilder &B) {
5294 auto Zero = B.buildConstant(Res: MRI.getType(Reg: Dst), Val: 0);
5295 B.buildSub(Dst, Src0: Zero, Src1: ReplaceReg);
5296 };
5297 return true;
5298 }
5299 }
5300 return false;
5301}
5302
5303MachineInstr *CombinerHelper::buildUDivorURemUsingMul(MachineInstr &MI) const {
5304 unsigned Opcode = MI.getOpcode();
5305 assert(Opcode == TargetOpcode::G_UDIV || Opcode == TargetOpcode::G_UREM);
5306 auto &UDivorRem = cast<GenericMachineInstr>(Val&: MI);
5307 Register Dst = UDivorRem.getReg(Idx: 0);
5308 Register LHS = UDivorRem.getReg(Idx: 1);
5309 Register RHS = UDivorRem.getReg(Idx: 2);
5310 LLT Ty = MRI.getType(Reg: Dst);
5311 LLT ScalarTy = Ty.getScalarType();
5312 const unsigned EltBits = ScalarTy.getScalarSizeInBits();
5313 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5314 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
5315
5316 auto &MIB = Builder;
5317
5318 bool UseSRL = false;
5319 SmallVector<Register, 16> Shifts, Factors;
5320 auto *RHSDefInstr = cast<GenericMachineInstr>(Val: getDefIgnoringCopies(Reg: RHS, MRI));
5321 bool IsSplat = getIConstantSplatVal(MI: *RHSDefInstr, MRI).has_value();
5322
5323 auto BuildExactUDIVPattern = [&](const Constant *C) {
5324 // Don't recompute inverses for each splat element.
5325 if (IsSplat && !Factors.empty()) {
5326 Shifts.push_back(Elt: Shifts[0]);
5327 Factors.push_back(Elt: Factors[0]);
5328 return true;
5329 }
5330
5331 auto *CI = cast<ConstantInt>(Val: C);
5332 APInt Divisor = CI->getValue();
5333 unsigned Shift = Divisor.countr_zero();
5334 if (Shift) {
5335 Divisor.lshrInPlace(ShiftAmt: Shift);
5336 UseSRL = true;
5337 }
5338
5339 // Calculate the multiplicative inverse modulo BW.
5340 APInt Factor = Divisor.multiplicativeInverse();
5341 Shifts.push_back(Elt: MIB.buildConstant(Res: ScalarShiftAmtTy, Val: Shift).getReg(Idx: 0));
5342 Factors.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: Factor).getReg(Idx: 0));
5343 return true;
5344 };
5345
5346 if (MI.getFlag(Flag: MachineInstr::MIFlag::IsExact)) {
5347 // Collect all magic values from the build vector.
5348 if (!matchUnaryPredicate(MRI, Reg: RHS, Match: BuildExactUDIVPattern))
5349 llvm_unreachable("Expected unary predicate match to succeed");
5350
5351 Register Shift, Factor;
5352 if (Ty.isVector()) {
5353 Shift = MIB.buildBuildVector(Res: ShiftAmtTy, Ops: Shifts).getReg(Idx: 0);
5354 Factor = MIB.buildBuildVector(Res: Ty, Ops: Factors).getReg(Idx: 0);
5355 } else {
5356 Shift = Shifts[0];
5357 Factor = Factors[0];
5358 }
5359
5360 Register Res = LHS;
5361
5362 if (UseSRL)
5363 Res = MIB.buildLShr(Dst: Ty, Src0: Res, Src1: Shift, Flags: MachineInstr::IsExact).getReg(Idx: 0);
5364
5365 return MIB.buildMul(Dst: Ty, Src0: Res, Src1: Factor);
5366 }
5367
5368 unsigned KnownLeadingZeros =
5369 VT ? VT->getKnownBits(R: LHS).countMinLeadingZeros() : 0;
5370
5371 bool UseNPQ = false;
5372 SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
5373 auto BuildUDIVPattern = [&](const Constant *C) {
5374 auto *CI = cast<ConstantInt>(Val: C);
5375 const APInt &Divisor = CI->getValue();
5376
5377 bool SelNPQ = false;
5378 APInt Magic(Divisor.getBitWidth(), 0);
5379 unsigned PreShift = 0, PostShift = 0;
5380
5381 // Magic algorithm doesn't work for division by 1. We need to emit a select
5382 // at the end.
5383 // TODO: Use undef values for divisor of 1.
5384 if (!Divisor.isOne()) {
5385
5386 // UnsignedDivisionByConstantInfo doesn't work correctly if leading zeros
5387 // in the dividend exceeds the leading zeros for the divisor.
5388 UnsignedDivisionByConstantInfo magics =
5389 UnsignedDivisionByConstantInfo::get(
5390 D: Divisor, LeadingZeros: std::min(a: KnownLeadingZeros, b: Divisor.countl_zero()));
5391
5392 Magic = std::move(magics.Magic);
5393
5394 assert(magics.PreShift < Divisor.getBitWidth() &&
5395 "We shouldn't generate an undefined shift!");
5396 assert(magics.PostShift < Divisor.getBitWidth() &&
5397 "We shouldn't generate an undefined shift!");
5398 assert((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift");
5399 PreShift = magics.PreShift;
5400 PostShift = magics.PostShift;
5401 SelNPQ = magics.IsAdd;
5402 }
5403
5404 PreShifts.push_back(
5405 Elt: MIB.buildConstant(Res: ScalarShiftAmtTy, Val: PreShift).getReg(Idx: 0));
5406 MagicFactors.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: Magic).getReg(Idx: 0));
5407 NPQFactors.push_back(
5408 Elt: MIB.buildConstant(Res: ScalarTy,
5409 Val: SelNPQ ? APInt::getOneBitSet(numBits: EltBits, BitNo: EltBits - 1)
5410 : APInt::getZero(numBits: EltBits))
5411 .getReg(Idx: 0));
5412 PostShifts.push_back(
5413 Elt: MIB.buildConstant(Res: ScalarShiftAmtTy, Val: PostShift).getReg(Idx: 0));
5414 UseNPQ |= SelNPQ;
5415 return true;
5416 };
5417
5418 // Collect the shifts/magic values from each element.
5419 bool Matched = matchUnaryPredicate(MRI, Reg: RHS, Match: BuildUDIVPattern);
5420 (void)Matched;
5421 assert(Matched && "Expected unary predicate match to succeed");
5422
5423 Register PreShift, PostShift, MagicFactor, NPQFactor;
5424 auto *RHSDef = getOpcodeDef<GBuildVector>(Reg: RHS, MRI);
5425 if (RHSDef) {
5426 PreShift = MIB.buildBuildVector(Res: ShiftAmtTy, Ops: PreShifts).getReg(Idx: 0);
5427 MagicFactor = MIB.buildBuildVector(Res: Ty, Ops: MagicFactors).getReg(Idx: 0);
5428 NPQFactor = MIB.buildBuildVector(Res: Ty, Ops: NPQFactors).getReg(Idx: 0);
5429 PostShift = MIB.buildBuildVector(Res: ShiftAmtTy, Ops: PostShifts).getReg(Idx: 0);
5430 } else {
5431 assert(MRI.getType(RHS).isScalar() &&
5432 "Non-build_vector operation should have been a scalar");
5433 PreShift = PreShifts[0];
5434 MagicFactor = MagicFactors[0];
5435 PostShift = PostShifts[0];
5436 }
5437
5438 Register Q = LHS;
5439 Q = MIB.buildLShr(Dst: Ty, Src0: Q, Src1: PreShift).getReg(Idx: 0);
5440
5441 // Multiply the numerator (operand 0) by the magic value.
5442 Q = MIB.buildUMulH(Dst: Ty, Src0: Q, Src1: MagicFactor).getReg(Idx: 0);
5443
5444 if (UseNPQ) {
5445 Register NPQ = MIB.buildSub(Dst: Ty, Src0: LHS, Src1: Q).getReg(Idx: 0);
5446
5447 // For vectors we might have a mix of non-NPQ/NPQ paths, so use
5448 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
5449 if (Ty.isVector())
5450 NPQ = MIB.buildUMulH(Dst: Ty, Src0: NPQ, Src1: NPQFactor).getReg(Idx: 0);
5451 else
5452 NPQ = MIB.buildLShr(Dst: Ty, Src0: NPQ, Src1: MIB.buildConstant(Res: ShiftAmtTy, Val: 1)).getReg(Idx: 0);
5453
5454 Q = MIB.buildAdd(Dst: Ty, Src0: NPQ, Src1: Q).getReg(Idx: 0);
5455 }
5456
5457 Q = MIB.buildLShr(Dst: Ty, Src0: Q, Src1: PostShift).getReg(Idx: 0);
5458 auto One = MIB.buildConstant(Res: Ty, Val: 1);
5459 auto IsOne = MIB.buildICmp(
5460 Pred: CmpInst::Predicate::ICMP_EQ,
5461 Res: Ty.isScalar() ? LLT::scalar(SizeInBits: 1) : Ty.changeElementSize(NewEltSize: 1), Op0: RHS, Op1: One);
5462 auto ret = MIB.buildSelect(Res: Ty, Tst: IsOne, Op0: LHS, Op1: Q);
5463
5464 if (Opcode == TargetOpcode::G_UREM) {
5465 auto Prod = MIB.buildMul(Dst: Ty, Src0: ret, Src1: RHS);
5466 return MIB.buildSub(Dst: Ty, Src0: LHS, Src1: Prod);
5467 }
5468 return ret;
5469}
5470
5471bool CombinerHelper::matchUDivorURemByConst(MachineInstr &MI) const {
5472 unsigned Opcode = MI.getOpcode();
5473 assert(Opcode == TargetOpcode::G_UDIV || Opcode == TargetOpcode::G_UREM);
5474 Register Dst = MI.getOperand(i: 0).getReg();
5475 Register RHS = MI.getOperand(i: 2).getReg();
5476 LLT DstTy = MRI.getType(Reg: Dst);
5477
5478 auto &MF = *MI.getMF();
5479 AttributeList Attr = MF.getFunction().getAttributes();
5480 const auto &TLI = getTargetLowering();
5481 LLVMContext &Ctx = MF.getFunction().getContext();
5482 if (TLI.isIntDivCheap(VT: getApproximateEVTForLLT(Ty: DstTy, Ctx), Attr))
5483 return false;
5484
5485 // Don't do this for minsize because the instruction sequence is usually
5486 // larger.
5487 if (MF.getFunction().hasMinSize())
5488 return false;
5489
5490 if (Opcode == TargetOpcode::G_UDIV &&
5491 MI.getFlag(Flag: MachineInstr::MIFlag::IsExact)) {
5492 return matchUnaryPredicate(
5493 MRI, Reg: RHS, Match: [](const Constant *C) { return C && !C->isNullValue(); });
5494 }
5495
5496 auto *RHSDef = MRI.getVRegDef(Reg: RHS);
5497 if (!isConstantOrConstantVector(MI&: *RHSDef, MRI))
5498 return false;
5499
5500 // Don't do this if the types are not going to be legal.
5501 if (LI) {
5502 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_MUL, {DstTy, DstTy}}))
5503 return false;
5504 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_UMULH, {DstTy}}))
5505 return false;
5506 if (!isLegalOrBeforeLegalizer(
5507 Query: {TargetOpcode::G_ICMP,
5508 {DstTy.isVector() ? DstTy.changeElementSize(NewEltSize: 1) : LLT::scalar(SizeInBits: 1),
5509 DstTy}}))
5510 return false;
5511 if (Opcode == TargetOpcode::G_UREM &&
5512 !isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SUB, {DstTy, DstTy}}))
5513 return false;
5514 }
5515
5516 return matchUnaryPredicate(
5517 MRI, Reg: RHS, Match: [](const Constant *C) { return C && !C->isNullValue(); });
5518}
5519
5520void CombinerHelper::applyUDivorURemByConst(MachineInstr &MI) const {
5521 auto *NewMI = buildUDivorURemUsingMul(MI);
5522 replaceSingleDefInstWithReg(MI, Replacement: NewMI->getOperand(i: 0).getReg());
5523}
5524
5525bool CombinerHelper::matchSDivByConst(MachineInstr &MI) const {
5526 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
5527 Register Dst = MI.getOperand(i: 0).getReg();
5528 Register RHS = MI.getOperand(i: 2).getReg();
5529 LLT DstTy = MRI.getType(Reg: Dst);
5530 auto SizeInBits = DstTy.getScalarSizeInBits();
5531 LLT WideTy = DstTy.changeElementSize(NewEltSize: SizeInBits * 2);
5532
5533 auto &MF = *MI.getMF();
5534 AttributeList Attr = MF.getFunction().getAttributes();
5535 const auto &TLI = getTargetLowering();
5536 LLVMContext &Ctx = MF.getFunction().getContext();
5537 if (TLI.isIntDivCheap(VT: getApproximateEVTForLLT(Ty: DstTy, Ctx), Attr))
5538 return false;
5539
5540 // Don't do this for minsize because the instruction sequence is usually
5541 // larger.
5542 if (MF.getFunction().hasMinSize())
5543 return false;
5544
5545 // If the sdiv has an 'exact' flag we can use a simpler lowering.
5546 if (MI.getFlag(Flag: MachineInstr::MIFlag::IsExact)) {
5547 return matchUnaryPredicate(
5548 MRI, Reg: RHS, Match: [](const Constant *C) { return C && !C->isNullValue(); });
5549 }
5550
5551 auto *RHSDef = MRI.getVRegDef(Reg: RHS);
5552 if (!isConstantOrConstantVector(MI&: *RHSDef, MRI))
5553 return false;
5554
5555 // Don't do this if the types are not going to be legal.
5556 if (LI) {
5557 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_MUL, {DstTy, DstTy}}))
5558 return false;
5559 if (!isLegal(Query: {TargetOpcode::G_SMULH, {DstTy}}) &&
5560 !isLegalOrHasWidenScalar(Query: {TargetOpcode::G_MUL, {WideTy, WideTy}}))
5561 return false;
5562 }
5563
5564 return matchUnaryPredicate(
5565 MRI, Reg: RHS, Match: [](const Constant *C) { return C && !C->isNullValue(); });
5566}
5567
5568void CombinerHelper::applySDivByConst(MachineInstr &MI) const {
5569 auto *NewMI = buildSDivUsingMul(MI);
5570 replaceSingleDefInstWithReg(MI, Replacement: NewMI->getOperand(i: 0).getReg());
5571}
5572
5573MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) const {
5574 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
5575 auto &SDiv = cast<GenericMachineInstr>(Val&: MI);
5576 Register Dst = SDiv.getReg(Idx: 0);
5577 Register LHS = SDiv.getReg(Idx: 1);
5578 Register RHS = SDiv.getReg(Idx: 2);
5579 LLT Ty = MRI.getType(Reg: Dst);
5580 LLT ScalarTy = Ty.getScalarType();
5581 const unsigned EltBits = ScalarTy.getScalarSizeInBits();
5582 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5583 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
5584 auto &MIB = Builder;
5585
5586 bool UseSRA = false;
5587 SmallVector<Register, 16> ExactShifts, ExactFactors;
5588
5589 auto *RHSDefInstr = cast<GenericMachineInstr>(Val: getDefIgnoringCopies(Reg: RHS, MRI));
5590 bool IsSplat = getIConstantSplatVal(MI: *RHSDefInstr, MRI).has_value();
5591
5592 auto BuildExactSDIVPattern = [&](const Constant *C) {
5593 // Don't recompute inverses for each splat element.
5594 if (IsSplat && !ExactFactors.empty()) {
5595 ExactShifts.push_back(Elt: ExactShifts[0]);
5596 ExactFactors.push_back(Elt: ExactFactors[0]);
5597 return true;
5598 }
5599
5600 auto *CI = cast<ConstantInt>(Val: C);
5601 APInt Divisor = CI->getValue();
5602 unsigned Shift = Divisor.countr_zero();
5603 if (Shift) {
5604 Divisor.ashrInPlace(ShiftAmt: Shift);
5605 UseSRA = true;
5606 }
5607
5608 // Calculate the multiplicative inverse modulo BW.
5609 // 2^W requires W + 1 bits, so we have to extend and then truncate.
5610 APInt Factor = Divisor.multiplicativeInverse();
5611 ExactShifts.push_back(Elt: MIB.buildConstant(Res: ScalarShiftAmtTy, Val: Shift).getReg(Idx: 0));
5612 ExactFactors.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: Factor).getReg(Idx: 0));
5613 return true;
5614 };
5615
5616 if (MI.getFlag(Flag: MachineInstr::MIFlag::IsExact)) {
5617 // Collect all magic values from the build vector.
5618 bool Matched = matchUnaryPredicate(MRI, Reg: RHS, Match: BuildExactSDIVPattern);
5619 (void)Matched;
5620 assert(Matched && "Expected unary predicate match to succeed");
5621
5622 Register Shift, Factor;
5623 if (Ty.isVector()) {
5624 Shift = MIB.buildBuildVector(Res: ShiftAmtTy, Ops: ExactShifts).getReg(Idx: 0);
5625 Factor = MIB.buildBuildVector(Res: Ty, Ops: ExactFactors).getReg(Idx: 0);
5626 } else {
5627 Shift = ExactShifts[0];
5628 Factor = ExactFactors[0];
5629 }
5630
5631 Register Res = LHS;
5632
5633 if (UseSRA)
5634 Res = MIB.buildAShr(Dst: Ty, Src0: Res, Src1: Shift, Flags: MachineInstr::IsExact).getReg(Idx: 0);
5635
5636 return MIB.buildMul(Dst: Ty, Src0: Res, Src1: Factor);
5637 }
5638
5639 SmallVector<Register, 16> MagicFactors, Factors, Shifts, ShiftMasks;
5640
5641 auto BuildSDIVPattern = [&](const Constant *C) {
5642 auto *CI = cast<ConstantInt>(Val: C);
5643 const APInt &Divisor = CI->getValue();
5644
5645 SignedDivisionByConstantInfo Magics =
5646 SignedDivisionByConstantInfo::get(D: Divisor);
5647 int NumeratorFactor = 0;
5648 int ShiftMask = -1;
5649
5650 if (Divisor.isOne() || Divisor.isAllOnes()) {
5651 // If d is +1/-1, we just multiply the numerator by +1/-1.
5652 NumeratorFactor = Divisor.getSExtValue();
5653 Magics.Magic = 0;
5654 Magics.ShiftAmount = 0;
5655 ShiftMask = 0;
5656 } else if (Divisor.isStrictlyPositive() && Magics.Magic.isNegative()) {
5657 // If d > 0 and m < 0, add the numerator.
5658 NumeratorFactor = 1;
5659 } else if (Divisor.isNegative() && Magics.Magic.isStrictlyPositive()) {
5660 // If d < 0 and m > 0, subtract the numerator.
5661 NumeratorFactor = -1;
5662 }
5663
5664 MagicFactors.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: Magics.Magic).getReg(Idx: 0));
5665 Factors.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: NumeratorFactor).getReg(Idx: 0));
5666 Shifts.push_back(
5667 Elt: MIB.buildConstant(Res: ScalarShiftAmtTy, Val: Magics.ShiftAmount).getReg(Idx: 0));
5668 ShiftMasks.push_back(Elt: MIB.buildConstant(Res: ScalarTy, Val: ShiftMask).getReg(Idx: 0));
5669
5670 return true;
5671 };
5672
5673 // Collect the shifts/magic values from each element.
5674 bool Matched = matchUnaryPredicate(MRI, Reg: RHS, Match: BuildSDIVPattern);
5675 (void)Matched;
5676 assert(Matched && "Expected unary predicate match to succeed");
5677
5678 Register MagicFactor, Factor, Shift, ShiftMask;
5679 auto *RHSDef = getOpcodeDef<GBuildVector>(Reg: RHS, MRI);
5680 if (RHSDef) {
5681 MagicFactor = MIB.buildBuildVector(Res: Ty, Ops: MagicFactors).getReg(Idx: 0);
5682 Factor = MIB.buildBuildVector(Res: Ty, Ops: Factors).getReg(Idx: 0);
5683 Shift = MIB.buildBuildVector(Res: ShiftAmtTy, Ops: Shifts).getReg(Idx: 0);
5684 ShiftMask = MIB.buildBuildVector(Res: Ty, Ops: ShiftMasks).getReg(Idx: 0);
5685 } else {
5686 assert(MRI.getType(RHS).isScalar() &&
5687 "Non-build_vector operation should have been a scalar");
5688 MagicFactor = MagicFactors[0];
5689 Factor = Factors[0];
5690 Shift = Shifts[0];
5691 ShiftMask = ShiftMasks[0];
5692 }
5693
5694 Register Q = LHS;
5695 Q = MIB.buildSMulH(Dst: Ty, Src0: LHS, Src1: MagicFactor).getReg(Idx: 0);
5696
5697 // (Optionally) Add/subtract the numerator using Factor.
5698 Factor = MIB.buildMul(Dst: Ty, Src0: LHS, Src1: Factor).getReg(Idx: 0);
5699 Q = MIB.buildAdd(Dst: Ty, Src0: Q, Src1: Factor).getReg(Idx: 0);
5700
5701 // Shift right algebraic by shift value.
5702 Q = MIB.buildAShr(Dst: Ty, Src0: Q, Src1: Shift).getReg(Idx: 0);
5703
5704 // Extract the sign bit, mask it and add it to the quotient.
5705 auto SignShift = MIB.buildConstant(Res: ShiftAmtTy, Val: EltBits - 1);
5706 auto T = MIB.buildLShr(Dst: Ty, Src0: Q, Src1: SignShift);
5707 T = MIB.buildAnd(Dst: Ty, Src0: T, Src1: ShiftMask);
5708 return MIB.buildAdd(Dst: Ty, Src0: Q, Src1: T);
5709}
5710
5711bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) const {
5712 assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
5713 MI.getOpcode() == TargetOpcode::G_UDIV) &&
5714 "Expected SDIV or UDIV");
5715 auto &Div = cast<GenericMachineInstr>(Val&: MI);
5716 Register RHS = Div.getReg(Idx: 2);
5717 auto MatchPow2 = [&](const Constant *C) {
5718 auto *CI = dyn_cast<ConstantInt>(Val: C);
5719 return CI && (CI->getValue().isPowerOf2() ||
5720 (IsSigned && CI->getValue().isNegatedPowerOf2()));
5721 };
5722 return matchUnaryPredicate(MRI, Reg: RHS, Match: MatchPow2, /*AllowUndefs=*/false);
5723}
5724
5725void CombinerHelper::applySDivByPow2(MachineInstr &MI) const {
5726 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
5727 auto &SDiv = cast<GenericMachineInstr>(Val&: MI);
5728 Register Dst = SDiv.getReg(Idx: 0);
5729 Register LHS = SDiv.getReg(Idx: 1);
5730 Register RHS = SDiv.getReg(Idx: 2);
5731 LLT Ty = MRI.getType(Reg: Dst);
5732 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5733 LLT CCVT =
5734 Ty.isVector() ? LLT::vector(EC: Ty.getElementCount(), ScalarSizeInBits: 1) : LLT::scalar(SizeInBits: 1);
5735
5736 // Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
5737 // to the following version:
5738 //
5739 // %c1 = G_CTTZ %rhs
5740 // %inexact = G_SUB $bitwidth, %c1
5741 // %sign = %G_ASHR %lhs, $(bitwidth - 1)
5742 // %lshr = G_LSHR %sign, %inexact
5743 // %add = G_ADD %lhs, %lshr
5744 // %ashr = G_ASHR %add, %c1
5745 // %ashr = G_SELECT, %isoneorallones, %lhs, %ashr
5746 // %zero = G_CONSTANT $0
5747 // %neg = G_NEG %ashr
5748 // %isneg = G_ICMP SLT %rhs, %zero
5749 // %res = G_SELECT %isneg, %neg, %ashr
5750
5751 unsigned BitWidth = Ty.getScalarSizeInBits();
5752 auto Zero = Builder.buildConstant(Res: Ty, Val: 0);
5753
5754 auto Bits = Builder.buildConstant(Res: ShiftAmtTy, Val: BitWidth);
5755 auto C1 = Builder.buildCTTZ(Dst: ShiftAmtTy, Src0: RHS);
5756 auto Inexact = Builder.buildSub(Dst: ShiftAmtTy, Src0: Bits, Src1: C1);
5757 // Splat the sign bit into the register
5758 auto Sign = Builder.buildAShr(
5759 Dst: Ty, Src0: LHS, Src1: Builder.buildConstant(Res: ShiftAmtTy, Val: BitWidth - 1));
5760
5761 // Add (LHS < 0) ? abs2 - 1 : 0;
5762 auto LSrl = Builder.buildLShr(Dst: Ty, Src0: Sign, Src1: Inexact);
5763 auto Add = Builder.buildAdd(Dst: Ty, Src0: LHS, Src1: LSrl);
5764 auto AShr = Builder.buildAShr(Dst: Ty, Src0: Add, Src1: C1);
5765
5766 // Special case: (sdiv X, 1) -> X
5767 // Special Case: (sdiv X, -1) -> 0-X
5768 auto One = Builder.buildConstant(Res: Ty, Val: 1);
5769 auto MinusOne = Builder.buildConstant(Res: Ty, Val: -1);
5770 auto IsOne = Builder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: CCVT, Op0: RHS, Op1: One);
5771 auto IsMinusOne =
5772 Builder.buildICmp(Pred: CmpInst::Predicate::ICMP_EQ, Res: CCVT, Op0: RHS, Op1: MinusOne);
5773 auto IsOneOrMinusOne = Builder.buildOr(Dst: CCVT, Src0: IsOne, Src1: IsMinusOne);
5774 AShr = Builder.buildSelect(Res: Ty, Tst: IsOneOrMinusOne, Op0: LHS, Op1: AShr);
5775
5776 // If divided by a positive value, we're done. Otherwise, the result must be
5777 // negated.
5778 auto Neg = Builder.buildNeg(Dst: Ty, Src0: AShr);
5779 auto IsNeg = Builder.buildICmp(Pred: CmpInst::Predicate::ICMP_SLT, Res: CCVT, Op0: RHS, Op1: Zero);
5780 Builder.buildSelect(Res: MI.getOperand(i: 0).getReg(), Tst: IsNeg, Op0: Neg, Op1: AShr);
5781 MI.eraseFromParent();
5782}
5783
5784void CombinerHelper::applyUDivByPow2(MachineInstr &MI) const {
5785 assert(MI.getOpcode() == TargetOpcode::G_UDIV && "Expected UDIV");
5786 auto &UDiv = cast<GenericMachineInstr>(Val&: MI);
5787 Register Dst = UDiv.getReg(Idx: 0);
5788 Register LHS = UDiv.getReg(Idx: 1);
5789 Register RHS = UDiv.getReg(Idx: 2);
5790 LLT Ty = MRI.getType(Reg: Dst);
5791 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5792
5793 auto C1 = Builder.buildCTTZ(Dst: ShiftAmtTy, Src0: RHS);
5794 Builder.buildLShr(Dst: MI.getOperand(i: 0).getReg(), Src0: LHS, Src1: C1);
5795 MI.eraseFromParent();
5796}
5797
5798bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) const {
5799 assert(MI.getOpcode() == TargetOpcode::G_UMULH);
5800 Register RHS = MI.getOperand(i: 2).getReg();
5801 Register Dst = MI.getOperand(i: 0).getReg();
5802 LLT Ty = MRI.getType(Reg: Dst);
5803 LLT RHSTy = MRI.getType(Reg: RHS);
5804 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5805 auto MatchPow2ExceptOne = [&](const Constant *C) {
5806 if (auto *CI = dyn_cast<ConstantInt>(Val: C))
5807 return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
5808 return false;
5809 };
5810 if (!matchUnaryPredicate(MRI, Reg: RHS, Match: MatchPow2ExceptOne, AllowUndefs: false))
5811 return false;
5812 // We need to check both G_LSHR and G_CTLZ because the combine uses G_CTLZ to
5813 // get log base 2, and it is not always legal for on a target.
5814 return isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}}) &&
5815 isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_CTLZ, {RHSTy, RHSTy}});
5816}
5817
5818void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) const {
5819 Register LHS = MI.getOperand(i: 1).getReg();
5820 Register RHS = MI.getOperand(i: 2).getReg();
5821 Register Dst = MI.getOperand(i: 0).getReg();
5822 LLT Ty = MRI.getType(Reg: Dst);
5823 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(ShiftValueTy: Ty);
5824 unsigned NumEltBits = Ty.getScalarSizeInBits();
5825
5826 auto LogBase2 = buildLogBase2(V: RHS, MIB&: Builder);
5827 auto ShiftAmt =
5828 Builder.buildSub(Dst: Ty, Src0: Builder.buildConstant(Res: Ty, Val: NumEltBits), Src1: LogBase2);
5829 auto Trunc = Builder.buildZExtOrTrunc(Res: ShiftAmtTy, Op: ShiftAmt);
5830 Builder.buildLShr(Dst, Src0: LHS, Src1: Trunc);
5831 MI.eraseFromParent();
5832}
5833
5834bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
5835 BuildFnTy &MatchInfo) const {
5836 unsigned Opc = MI.getOpcode();
5837 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
5838 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5839 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);
5840
5841 Register Dst = MI.getOperand(i: 0).getReg();
5842 Register X = MI.getOperand(i: 1).getReg();
5843 Register Y = MI.getOperand(i: 2).getReg();
5844 LLT Type = MRI.getType(Reg: Dst);
5845
5846 // fold (fadd x, fneg(y)) -> (fsub x, y)
5847 // fold (fadd fneg(y), x) -> (fsub x, y)
5848 // G_ADD is commutative so both cases are checked by m_GFAdd
5849 if (mi_match(R: Dst, MRI, P: m_GFAdd(L: m_Reg(R&: X), R: m_GFNeg(Src: m_Reg(R&: Y)))) &&
5850 isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_FSUB, {Type}})) {
5851 Opc = TargetOpcode::G_FSUB;
5852 }
5853 /// fold (fsub x, fneg(y)) -> (fadd x, y)
5854 else if (mi_match(R: Dst, MRI, P: m_GFSub(L: m_Reg(R&: X), R: m_GFNeg(Src: m_Reg(R&: Y)))) &&
5855 isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_FADD, {Type}})) {
5856 Opc = TargetOpcode::G_FADD;
5857 }
5858 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
5859 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
5860 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
5861 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
5862 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
5863 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5864 mi_match(R: X, MRI, P: m_GFNeg(Src: m_Reg(R&: X))) &&
5865 mi_match(R: Y, MRI, P: m_GFNeg(Src: m_Reg(R&: Y)))) {
5866 // no opcode change
5867 } else
5868 return false;
5869
5870 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5871 Observer.changingInstr(MI);
5872 MI.setDesc(B.getTII().get(Opcode: Opc));
5873 MI.getOperand(i: 1).setReg(X);
5874 MI.getOperand(i: 2).setReg(Y);
5875 Observer.changedInstr(MI);
5876 };
5877 return true;
5878}
5879
5880bool CombinerHelper::matchFsubToFneg(MachineInstr &MI,
5881 Register &MatchInfo) const {
5882 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5883
5884 Register LHS = MI.getOperand(i: 1).getReg();
5885 MatchInfo = MI.getOperand(i: 2).getReg();
5886 LLT Ty = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
5887
5888 const auto LHSCst = Ty.isVector()
5889 ? getFConstantSplat(VReg: LHS, MRI, /* allowUndef */ AllowUndef: true)
5890 : getFConstantVRegValWithLookThrough(VReg: LHS, MRI);
5891 if (!LHSCst)
5892 return false;
5893
5894 // -0.0 is always allowed
5895 if (LHSCst->Value.isNegZero())
5896 return true;
5897
5898 // +0.0 is only allowed if nsz is set.
5899 if (LHSCst->Value.isPosZero())
5900 return MI.getFlag(Flag: MachineInstr::FmNsz);
5901
5902 return false;
5903}
5904
5905void CombinerHelper::applyFsubToFneg(MachineInstr &MI,
5906 Register &MatchInfo) const {
5907 Register Dst = MI.getOperand(i: 0).getReg();
5908 Builder.buildFNeg(
5909 Dst, Src0: Builder.buildFCanonicalize(Dst: MRI.getType(Reg: Dst), Src0: MatchInfo).getReg(Idx: 0));
5910 eraseInst(MI);
5911}
5912
5913/// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5914/// due to global flags or MachineInstr flags.
5915static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) {
5916 if (MI.getOpcode() != TargetOpcode::G_FMUL)
5917 return false;
5918 return AllowFusionGlobally || MI.getFlag(Flag: MachineInstr::MIFlag::FmContract);
5919}
5920
5921static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
5922 const MachineRegisterInfo &MRI) {
5923 return std::distance(first: MRI.use_instr_nodbg_begin(RegNo: MI0.getOperand(i: 0).getReg()),
5924 last: MRI.use_instr_nodbg_end()) >
5925 std::distance(first: MRI.use_instr_nodbg_begin(RegNo: MI1.getOperand(i: 0).getReg()),
5926 last: MRI.use_instr_nodbg_end());
5927}
5928
5929bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
5930 bool &AllowFusionGlobally,
5931 bool &HasFMAD, bool &Aggressive,
5932 bool CanReassociate) const {
5933
5934 auto *MF = MI.getMF();
5935 const auto &TLI = *MF->getSubtarget().getTargetLowering();
5936 const TargetOptions &Options = MF->getTarget().Options;
5937 LLT DstType = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
5938
5939 if (CanReassociate &&
5940 !(Options.UnsafeFPMath || MI.getFlag(Flag: MachineInstr::MIFlag::FmReassoc)))
5941 return false;
5942
5943 // Floating-point multiply-add with intermediate rounding.
5944 HasFMAD = (!isPreLegalize() && TLI.isFMADLegal(MI, Ty: DstType));
5945 // Floating-point multiply-add without intermediate rounding.
5946 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(MF: *MF, DstType) &&
5947 isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_FMA, {DstType}});
5948 // No valid opcode, do not combine.
5949 if (!HasFMAD && !HasFMA)
5950 return false;
5951
5952 AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast ||
5953 Options.UnsafeFPMath || HasFMAD;
5954 // If the addition is not contractable, do not combine.
5955 if (!AllowFusionGlobally && !MI.getFlag(Flag: MachineInstr::MIFlag::FmContract))
5956 return false;
5957
5958 Aggressive = TLI.enableAggressiveFMAFusion(Ty: DstType);
5959 return true;
5960}
5961
5962bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5963 MachineInstr &MI,
5964 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
5965 assert(MI.getOpcode() == TargetOpcode::G_FADD);
5966
5967 bool AllowFusionGlobally, HasFMAD, Aggressive;
5968 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5969 return false;
5970
5971 Register Op1 = MI.getOperand(i: 1).getReg();
5972 Register Op2 = MI.getOperand(i: 2).getReg();
5973 DefinitionAndSourceRegister LHS = {.MI: MRI.getVRegDef(Reg: Op1), .Reg: Op1};
5974 DefinitionAndSourceRegister RHS = {.MI: MRI.getVRegDef(Reg: Op2), .Reg: Op2};
5975 unsigned PreferredFusedOpcode =
5976 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5977
5978 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5979 // prefer to fold the multiply with fewer uses.
5980 if (Aggressive && isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
5981 isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally)) {
5982 if (hasMoreUses(MI0: *LHS.MI, MI1: *RHS.MI, MRI))
5983 std::swap(a&: LHS, b&: RHS);
5984 }
5985
5986 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5987 if (isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
5988 (Aggressive || MRI.hasOneNonDBGUse(RegNo: LHS.Reg))) {
5989 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5990 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
5991 SrcOps: {LHS.MI->getOperand(i: 1).getReg(),
5992 LHS.MI->getOperand(i: 2).getReg(), RHS.Reg});
5993 };
5994 return true;
5995 }
5996
5997 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5998 if (isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally) &&
5999 (Aggressive || MRI.hasOneNonDBGUse(RegNo: RHS.Reg))) {
6000 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6001 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6002 SrcOps: {RHS.MI->getOperand(i: 1).getReg(),
6003 RHS.MI->getOperand(i: 2).getReg(), LHS.Reg});
6004 };
6005 return true;
6006 }
6007
6008 return false;
6009}
6010
6011bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
6012 MachineInstr &MI,
6013 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6014 assert(MI.getOpcode() == TargetOpcode::G_FADD);
6015
6016 bool AllowFusionGlobally, HasFMAD, Aggressive;
6017 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6018 return false;
6019
6020 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
6021 Register Op1 = MI.getOperand(i: 1).getReg();
6022 Register Op2 = MI.getOperand(i: 2).getReg();
6023 DefinitionAndSourceRegister LHS = {.MI: MRI.getVRegDef(Reg: Op1), .Reg: Op1};
6024 DefinitionAndSourceRegister RHS = {.MI: MRI.getVRegDef(Reg: Op2), .Reg: Op2};
6025 LLT DstType = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6026
6027 unsigned PreferredFusedOpcode =
6028 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6029
6030 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
6031 // prefer to fold the multiply with fewer uses.
6032 if (Aggressive && isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
6033 isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally)) {
6034 if (hasMoreUses(MI0: *LHS.MI, MI1: *RHS.MI, MRI))
6035 std::swap(a&: LHS, b&: RHS);
6036 }
6037
6038 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
6039 MachineInstr *FpExtSrc;
6040 if (mi_match(R: LHS.Reg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FpExtSrc))) &&
6041 isContractableFMul(MI&: *FpExtSrc, AllowFusionGlobally) &&
6042 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6043 SrcTy: MRI.getType(Reg: FpExtSrc->getOperand(i: 1).getReg()))) {
6044 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6045 auto FpExtX = B.buildFPExt(Res: DstType, Op: FpExtSrc->getOperand(i: 1).getReg());
6046 auto FpExtY = B.buildFPExt(Res: DstType, Op: FpExtSrc->getOperand(i: 2).getReg());
6047 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6048 SrcOps: {FpExtX.getReg(Idx: 0), FpExtY.getReg(Idx: 0), RHS.Reg});
6049 };
6050 return true;
6051 }
6052
6053 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
6054 // Note: Commutes FADD operands.
6055 if (mi_match(R: RHS.Reg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FpExtSrc))) &&
6056 isContractableFMul(MI&: *FpExtSrc, AllowFusionGlobally) &&
6057 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6058 SrcTy: MRI.getType(Reg: FpExtSrc->getOperand(i: 1).getReg()))) {
6059 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6060 auto FpExtX = B.buildFPExt(Res: DstType, Op: FpExtSrc->getOperand(i: 1).getReg());
6061 auto FpExtY = B.buildFPExt(Res: DstType, Op: FpExtSrc->getOperand(i: 2).getReg());
6062 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6063 SrcOps: {FpExtX.getReg(Idx: 0), FpExtY.getReg(Idx: 0), LHS.Reg});
6064 };
6065 return true;
6066 }
6067
6068 return false;
6069}
6070
6071bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
6072 MachineInstr &MI,
6073 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6074 assert(MI.getOpcode() == TargetOpcode::G_FADD);
6075
6076 bool AllowFusionGlobally, HasFMAD, Aggressive;
6077 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, CanReassociate: true))
6078 return false;
6079
6080 Register Op1 = MI.getOperand(i: 1).getReg();
6081 Register Op2 = MI.getOperand(i: 2).getReg();
6082 DefinitionAndSourceRegister LHS = {.MI: MRI.getVRegDef(Reg: Op1), .Reg: Op1};
6083 DefinitionAndSourceRegister RHS = {.MI: MRI.getVRegDef(Reg: Op2), .Reg: Op2};
6084 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6085
6086 unsigned PreferredFusedOpcode =
6087 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6088
6089 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
6090 // prefer to fold the multiply with fewer uses.
6091 if (Aggressive && isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
6092 isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally)) {
6093 if (hasMoreUses(MI0: *LHS.MI, MI1: *RHS.MI, MRI))
6094 std::swap(a&: LHS, b&: RHS);
6095 }
6096
6097 MachineInstr *FMA = nullptr;
6098 Register Z;
6099 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
6100 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
6101 (MRI.getVRegDef(Reg: LHS.MI->getOperand(i: 3).getReg())->getOpcode() ==
6102 TargetOpcode::G_FMUL) &&
6103 MRI.hasOneNonDBGUse(RegNo: LHS.MI->getOperand(i: 0).getReg()) &&
6104 MRI.hasOneNonDBGUse(RegNo: LHS.MI->getOperand(i: 3).getReg())) {
6105 FMA = LHS.MI;
6106 Z = RHS.Reg;
6107 }
6108 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
6109 else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
6110 (MRI.getVRegDef(Reg: RHS.MI->getOperand(i: 3).getReg())->getOpcode() ==
6111 TargetOpcode::G_FMUL) &&
6112 MRI.hasOneNonDBGUse(RegNo: RHS.MI->getOperand(i: 0).getReg()) &&
6113 MRI.hasOneNonDBGUse(RegNo: RHS.MI->getOperand(i: 3).getReg())) {
6114 Z = LHS.Reg;
6115 FMA = RHS.MI;
6116 }
6117
6118 if (FMA) {
6119 MachineInstr *FMulMI = MRI.getVRegDef(Reg: FMA->getOperand(i: 3).getReg());
6120 Register X = FMA->getOperand(i: 1).getReg();
6121 Register Y = FMA->getOperand(i: 2).getReg();
6122 Register U = FMulMI->getOperand(i: 1).getReg();
6123 Register V = FMulMI->getOperand(i: 2).getReg();
6124
6125 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6126 Register InnerFMA = MRI.createGenericVirtualRegister(Ty: DstTy);
6127 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {InnerFMA}, SrcOps: {U, V, Z});
6128 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6129 SrcOps: {X, Y, InnerFMA});
6130 };
6131 return true;
6132 }
6133
6134 return false;
6135}
6136
6137bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
6138 MachineInstr &MI,
6139 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6140 assert(MI.getOpcode() == TargetOpcode::G_FADD);
6141
6142 bool AllowFusionGlobally, HasFMAD, Aggressive;
6143 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6144 return false;
6145
6146 if (!Aggressive)
6147 return false;
6148
6149 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
6150 LLT DstType = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6151 Register Op1 = MI.getOperand(i: 1).getReg();
6152 Register Op2 = MI.getOperand(i: 2).getReg();
6153 DefinitionAndSourceRegister LHS = {.MI: MRI.getVRegDef(Reg: Op1), .Reg: Op1};
6154 DefinitionAndSourceRegister RHS = {.MI: MRI.getVRegDef(Reg: Op2), .Reg: Op2};
6155
6156 unsigned PreferredFusedOpcode =
6157 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6158
6159 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
6160 // prefer to fold the multiply with fewer uses.
6161 if (Aggressive && isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
6162 isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally)) {
6163 if (hasMoreUses(MI0: *LHS.MI, MI1: *RHS.MI, MRI))
6164 std::swap(a&: LHS, b&: RHS);
6165 }
6166
6167 // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
6168 auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
6169 Register Y, MachineIRBuilder &B) {
6170 Register FpExtU = B.buildFPExt(Res: DstType, Op: U).getReg(Idx: 0);
6171 Register FpExtV = B.buildFPExt(Res: DstType, Op: V).getReg(Idx: 0);
6172 Register InnerFMA =
6173 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {DstType}, SrcOps: {FpExtU, FpExtV, Z})
6174 .getReg(Idx: 0);
6175 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6176 SrcOps: {X, Y, InnerFMA});
6177 };
6178
6179 MachineInstr *FMulMI, *FMAMI;
6180 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
6181 // -> (fma x, y, (fma (fpext u), (fpext v), z))
6182 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
6183 mi_match(R: LHS.MI->getOperand(i: 3).getReg(), MRI,
6184 P: m_GFPExt(Src: m_MInstr(MI&: FMulMI))) &&
6185 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6186 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6187 SrcTy: MRI.getType(Reg: FMulMI->getOperand(i: 0).getReg()))) {
6188 MatchInfo = [=](MachineIRBuilder &B) {
6189 buildMatchInfo(FMulMI->getOperand(i: 1).getReg(),
6190 FMulMI->getOperand(i: 2).getReg(), RHS.Reg,
6191 LHS.MI->getOperand(i: 1).getReg(),
6192 LHS.MI->getOperand(i: 2).getReg(), B);
6193 };
6194 return true;
6195 }
6196
6197 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
6198 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
6199 // FIXME: This turns two single-precision and one double-precision
6200 // operation into two double-precision operations, which might not be
6201 // interesting for all targets, especially GPUs.
6202 if (mi_match(R: LHS.Reg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FMAMI))) &&
6203 FMAMI->getOpcode() == PreferredFusedOpcode) {
6204 MachineInstr *FMulMI = MRI.getVRegDef(Reg: FMAMI->getOperand(i: 3).getReg());
6205 if (isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6206 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6207 SrcTy: MRI.getType(Reg: FMAMI->getOperand(i: 0).getReg()))) {
6208 MatchInfo = [=](MachineIRBuilder &B) {
6209 Register X = FMAMI->getOperand(i: 1).getReg();
6210 Register Y = FMAMI->getOperand(i: 2).getReg();
6211 X = B.buildFPExt(Res: DstType, Op: X).getReg(Idx: 0);
6212 Y = B.buildFPExt(Res: DstType, Op: Y).getReg(Idx: 0);
6213 buildMatchInfo(FMulMI->getOperand(i: 1).getReg(),
6214 FMulMI->getOperand(i: 2).getReg(), RHS.Reg, X, Y, B);
6215 };
6216
6217 return true;
6218 }
6219 }
6220
6221 // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
6222 // -> (fma x, y, (fma (fpext u), (fpext v), z))
6223 if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
6224 mi_match(R: RHS.MI->getOperand(i: 3).getReg(), MRI,
6225 P: m_GFPExt(Src: m_MInstr(MI&: FMulMI))) &&
6226 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6227 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6228 SrcTy: MRI.getType(Reg: FMulMI->getOperand(i: 0).getReg()))) {
6229 MatchInfo = [=](MachineIRBuilder &B) {
6230 buildMatchInfo(FMulMI->getOperand(i: 1).getReg(),
6231 FMulMI->getOperand(i: 2).getReg(), LHS.Reg,
6232 RHS.MI->getOperand(i: 1).getReg(),
6233 RHS.MI->getOperand(i: 2).getReg(), B);
6234 };
6235 return true;
6236 }
6237
6238 // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
6239 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
6240 // FIXME: This turns two single-precision and one double-precision
6241 // operation into two double-precision operations, which might not be
6242 // interesting for all targets, especially GPUs.
6243 if (mi_match(R: RHS.Reg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FMAMI))) &&
6244 FMAMI->getOpcode() == PreferredFusedOpcode) {
6245 MachineInstr *FMulMI = MRI.getVRegDef(Reg: FMAMI->getOperand(i: 3).getReg());
6246 if (isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6247 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstType,
6248 SrcTy: MRI.getType(Reg: FMAMI->getOperand(i: 0).getReg()))) {
6249 MatchInfo = [=](MachineIRBuilder &B) {
6250 Register X = FMAMI->getOperand(i: 1).getReg();
6251 Register Y = FMAMI->getOperand(i: 2).getReg();
6252 X = B.buildFPExt(Res: DstType, Op: X).getReg(Idx: 0);
6253 Y = B.buildFPExt(Res: DstType, Op: Y).getReg(Idx: 0);
6254 buildMatchInfo(FMulMI->getOperand(i: 1).getReg(),
6255 FMulMI->getOperand(i: 2).getReg(), LHS.Reg, X, Y, B);
6256 };
6257 return true;
6258 }
6259 }
6260
6261 return false;
6262}
6263
6264bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
6265 MachineInstr &MI,
6266 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6267 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
6268
6269 bool AllowFusionGlobally, HasFMAD, Aggressive;
6270 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6271 return false;
6272
6273 Register Op1 = MI.getOperand(i: 1).getReg();
6274 Register Op2 = MI.getOperand(i: 2).getReg();
6275 DefinitionAndSourceRegister LHS = {.MI: MRI.getVRegDef(Reg: Op1), .Reg: Op1};
6276 DefinitionAndSourceRegister RHS = {.MI: MRI.getVRegDef(Reg: Op2), .Reg: Op2};
6277 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6278
6279 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
6280 // prefer to fold the multiply with fewer uses.
6281 int FirstMulHasFewerUses = true;
6282 if (isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
6283 isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally) &&
6284 hasMoreUses(MI0: *LHS.MI, MI1: *RHS.MI, MRI))
6285 FirstMulHasFewerUses = false;
6286
6287 unsigned PreferredFusedOpcode =
6288 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6289
6290 // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
6291 if (FirstMulHasFewerUses &&
6292 (isContractableFMul(MI&: *LHS.MI, AllowFusionGlobally) &&
6293 (Aggressive || MRI.hasOneNonDBGUse(RegNo: LHS.Reg)))) {
6294 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6295 Register NegZ = B.buildFNeg(Dst: DstTy, Src0: RHS.Reg).getReg(Idx: 0);
6296 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6297 SrcOps: {LHS.MI->getOperand(i: 1).getReg(),
6298 LHS.MI->getOperand(i: 2).getReg(), NegZ});
6299 };
6300 return true;
6301 }
6302 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
6303 else if ((isContractableFMul(MI&: *RHS.MI, AllowFusionGlobally) &&
6304 (Aggressive || MRI.hasOneNonDBGUse(RegNo: RHS.Reg)))) {
6305 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6306 Register NegY =
6307 B.buildFNeg(Dst: DstTy, Src0: RHS.MI->getOperand(i: 1).getReg()).getReg(Idx: 0);
6308 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6309 SrcOps: {NegY, RHS.MI->getOperand(i: 2).getReg(), LHS.Reg});
6310 };
6311 return true;
6312 }
6313
6314 return false;
6315}
6316
6317bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
6318 MachineInstr &MI,
6319 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6320 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
6321
6322 bool AllowFusionGlobally, HasFMAD, Aggressive;
6323 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6324 return false;
6325
6326 Register LHSReg = MI.getOperand(i: 1).getReg();
6327 Register RHSReg = MI.getOperand(i: 2).getReg();
6328 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6329
6330 unsigned PreferredFusedOpcode =
6331 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6332
6333 MachineInstr *FMulMI;
6334 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
6335 if (mi_match(R: LHSReg, MRI, P: m_GFNeg(Src: m_MInstr(MI&: FMulMI))) &&
6336 (Aggressive || (MRI.hasOneNonDBGUse(RegNo: LHSReg) &&
6337 MRI.hasOneNonDBGUse(RegNo: FMulMI->getOperand(i: 0).getReg()))) &&
6338 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally)) {
6339 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6340 Register NegX =
6341 B.buildFNeg(Dst: DstTy, Src0: FMulMI->getOperand(i: 1).getReg()).getReg(Idx: 0);
6342 Register NegZ = B.buildFNeg(Dst: DstTy, Src0: RHSReg).getReg(Idx: 0);
6343 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6344 SrcOps: {NegX, FMulMI->getOperand(i: 2).getReg(), NegZ});
6345 };
6346 return true;
6347 }
6348
6349 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
6350 if (mi_match(R: RHSReg, MRI, P: m_GFNeg(Src: m_MInstr(MI&: FMulMI))) &&
6351 (Aggressive || (MRI.hasOneNonDBGUse(RegNo: RHSReg) &&
6352 MRI.hasOneNonDBGUse(RegNo: FMulMI->getOperand(i: 0).getReg()))) &&
6353 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally)) {
6354 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6355 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6356 SrcOps: {FMulMI->getOperand(i: 1).getReg(),
6357 FMulMI->getOperand(i: 2).getReg(), LHSReg});
6358 };
6359 return true;
6360 }
6361
6362 return false;
6363}
6364
6365bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
6366 MachineInstr &MI,
6367 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6368 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
6369
6370 bool AllowFusionGlobally, HasFMAD, Aggressive;
6371 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6372 return false;
6373
6374 Register LHSReg = MI.getOperand(i: 1).getReg();
6375 Register RHSReg = MI.getOperand(i: 2).getReg();
6376 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6377
6378 unsigned PreferredFusedOpcode =
6379 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6380
6381 MachineInstr *FMulMI;
6382 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
6383 if (mi_match(R: LHSReg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FMulMI))) &&
6384 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6385 (Aggressive || MRI.hasOneNonDBGUse(RegNo: LHSReg))) {
6386 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6387 Register FpExtX =
6388 B.buildFPExt(Res: DstTy, Op: FMulMI->getOperand(i: 1).getReg()).getReg(Idx: 0);
6389 Register FpExtY =
6390 B.buildFPExt(Res: DstTy, Op: FMulMI->getOperand(i: 2).getReg()).getReg(Idx: 0);
6391 Register NegZ = B.buildFNeg(Dst: DstTy, Src0: RHSReg).getReg(Idx: 0);
6392 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6393 SrcOps: {FpExtX, FpExtY, NegZ});
6394 };
6395 return true;
6396 }
6397
6398 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
6399 if (mi_match(R: RHSReg, MRI, P: m_GFPExt(Src: m_MInstr(MI&: FMulMI))) &&
6400 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6401 (Aggressive || MRI.hasOneNonDBGUse(RegNo: RHSReg))) {
6402 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6403 Register FpExtY =
6404 B.buildFPExt(Res: DstTy, Op: FMulMI->getOperand(i: 1).getReg()).getReg(Idx: 0);
6405 Register NegY = B.buildFNeg(Dst: DstTy, Src0: FpExtY).getReg(Idx: 0);
6406 Register FpExtZ =
6407 B.buildFPExt(Res: DstTy, Op: FMulMI->getOperand(i: 2).getReg()).getReg(Idx: 0);
6408 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {MI.getOperand(i: 0).getReg()},
6409 SrcOps: {NegY, FpExtZ, LHSReg});
6410 };
6411 return true;
6412 }
6413
6414 return false;
6415}
6416
6417bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
6418 MachineInstr &MI,
6419 std::function<void(MachineIRBuilder &)> &MatchInfo) const {
6420 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
6421
6422 bool AllowFusionGlobally, HasFMAD, Aggressive;
6423 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
6424 return false;
6425
6426 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
6427 LLT DstTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6428 Register LHSReg = MI.getOperand(i: 1).getReg();
6429 Register RHSReg = MI.getOperand(i: 2).getReg();
6430
6431 unsigned PreferredFusedOpcode =
6432 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
6433
6434 auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
6435 MachineIRBuilder &B) {
6436 Register FpExtX = B.buildFPExt(Res: DstTy, Op: X).getReg(Idx: 0);
6437 Register FpExtY = B.buildFPExt(Res: DstTy, Op: Y).getReg(Idx: 0);
6438 B.buildInstr(Opc: PreferredFusedOpcode, DstOps: {Dst}, SrcOps: {FpExtX, FpExtY, Z});
6439 };
6440
6441 MachineInstr *FMulMI;
6442 // fold (fsub (fpext (fneg (fmul x, y))), z) ->
6443 // (fneg (fma (fpext x), (fpext y), z))
6444 // fold (fsub (fneg (fpext (fmul x, y))), z) ->
6445 // (fneg (fma (fpext x), (fpext y), z))
6446 if ((mi_match(R: LHSReg, MRI, P: m_GFPExt(Src: m_GFNeg(Src: m_MInstr(MI&: FMulMI)))) ||
6447 mi_match(R: LHSReg, MRI, P: m_GFNeg(Src: m_GFPExt(Src: m_MInstr(MI&: FMulMI))))) &&
6448 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6449 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstTy,
6450 SrcTy: MRI.getType(Reg: FMulMI->getOperand(i: 0).getReg()))) {
6451 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6452 Register FMAReg = MRI.createGenericVirtualRegister(Ty: DstTy);
6453 buildMatchInfo(FMAReg, FMulMI->getOperand(i: 1).getReg(),
6454 FMulMI->getOperand(i: 2).getReg(), RHSReg, B);
6455 B.buildFNeg(Dst: MI.getOperand(i: 0).getReg(), Src0: FMAReg);
6456 };
6457 return true;
6458 }
6459
6460 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
6461 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
6462 if ((mi_match(R: RHSReg, MRI, P: m_GFPExt(Src: m_GFNeg(Src: m_MInstr(MI&: FMulMI)))) ||
6463 mi_match(R: RHSReg, MRI, P: m_GFNeg(Src: m_GFPExt(Src: m_MInstr(MI&: FMulMI))))) &&
6464 isContractableFMul(MI&: *FMulMI, AllowFusionGlobally) &&
6465 TLI.isFPExtFoldable(MI, Opcode: PreferredFusedOpcode, DestTy: DstTy,
6466 SrcTy: MRI.getType(Reg: FMulMI->getOperand(i: 0).getReg()))) {
6467 MatchInfo = [=, &MI](MachineIRBuilder &B) {
6468 buildMatchInfo(MI.getOperand(i: 0).getReg(), FMulMI->getOperand(i: 1).getReg(),
6469 FMulMI->getOperand(i: 2).getReg(), LHSReg, B);
6470 };
6471 return true;
6472 }
6473
6474 return false;
6475}
6476
6477bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
6478 unsigned &IdxToPropagate) const {
6479 bool PropagateNaN;
6480 switch (MI.getOpcode()) {
6481 default:
6482 return false;
6483 case TargetOpcode::G_FMINNUM:
6484 case TargetOpcode::G_FMAXNUM:
6485 PropagateNaN = false;
6486 break;
6487 case TargetOpcode::G_FMINIMUM:
6488 case TargetOpcode::G_FMAXIMUM:
6489 PropagateNaN = true;
6490 break;
6491 }
6492
6493 auto MatchNaN = [&](unsigned Idx) {
6494 Register MaybeNaNReg = MI.getOperand(i: Idx).getReg();
6495 const ConstantFP *MaybeCst = getConstantFPVRegVal(VReg: MaybeNaNReg, MRI);
6496 if (!MaybeCst || !MaybeCst->getValueAPF().isNaN())
6497 return false;
6498 IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
6499 return true;
6500 };
6501
6502 return MatchNaN(1) || MatchNaN(2);
6503}
6504
6505bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) const {
6506 assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD");
6507 Register LHS = MI.getOperand(i: 1).getReg();
6508 Register RHS = MI.getOperand(i: 2).getReg();
6509
6510 // Helper lambda to check for opportunities for
6511 // A + (B - A) -> B
6512 // (B - A) + A -> B
6513 auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) {
6514 Register Reg;
6515 return mi_match(R: MaybeSub, MRI, P: m_GSub(L: m_Reg(R&: Src), R: m_Reg(R&: Reg))) &&
6516 Reg == MaybeSameReg;
6517 };
6518 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
6519}
6520
6521bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
6522 Register &MatchInfo) const {
6523 // This combine folds the following patterns:
6524 //
6525 // G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k))
6526 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), G_TRUNC(G_LSHR(G_BITCAST(x), k)))
6527 // into
6528 // x
6529 // if
6530 // k == sizeof(VecEltTy)/2
6531 // type(x) == type(dst)
6532 //
6533 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), undef)
6534 // into
6535 // x
6536 // if
6537 // type(x) == type(dst)
6538
6539 LLT DstVecTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6540 LLT DstEltTy = DstVecTy.getElementType();
6541
6542 Register Lo, Hi;
6543
6544 if (mi_match(
6545 MI, MRI,
6546 P: m_GBuildVector(L: m_GTrunc(Src: m_GBitcast(Src: m_Reg(R&: Lo))), R: m_GImplicitDef()))) {
6547 MatchInfo = Lo;
6548 return MRI.getType(Reg: MatchInfo) == DstVecTy;
6549 }
6550
6551 std::optional<ValueAndVReg> ShiftAmount;
6552 const auto LoPattern = m_GBitcast(Src: m_Reg(R&: Lo));
6553 const auto HiPattern = m_GLShr(L: m_GBitcast(Src: m_Reg(R&: Hi)), R: m_GCst(ValReg&: ShiftAmount));
6554 if (mi_match(
6555 MI, MRI,
6556 P: m_any_of(preds: m_GBuildVectorTrunc(L: LoPattern, R: HiPattern),
6557 preds: m_GBuildVector(L: m_GTrunc(Src: LoPattern), R: m_GTrunc(Src: HiPattern))))) {
6558 if (Lo == Hi && ShiftAmount->Value == DstEltTy.getSizeInBits()) {
6559 MatchInfo = Lo;
6560 return MRI.getType(Reg: MatchInfo) == DstVecTy;
6561 }
6562 }
6563
6564 return false;
6565}
6566
6567bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI,
6568 Register &MatchInfo) const {
6569 // Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x
6570 // if type(x) == type(G_TRUNC)
6571 if (!mi_match(R: MI.getOperand(i: 1).getReg(), MRI,
6572 P: m_GBitcast(Src: m_GBuildVector(L: m_Reg(R&: MatchInfo), R: m_Reg()))))
6573 return false;
6574
6575 return MRI.getType(Reg: MatchInfo) == MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6576}
6577
6578bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
6579 Register &MatchInfo) const {
6580 // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
6581 // y if K == size of vector element type
6582 std::optional<ValueAndVReg> ShiftAmt;
6583 if (!mi_match(R: MI.getOperand(i: 1).getReg(), MRI,
6584 P: m_GLShr(L: m_GBitcast(Src: m_GBuildVector(L: m_Reg(), R: m_Reg(R&: MatchInfo))),
6585 R: m_GCst(ValReg&: ShiftAmt))))
6586 return false;
6587
6588 LLT MatchTy = MRI.getType(Reg: MatchInfo);
6589 return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() &&
6590 MatchTy == MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6591}
6592
6593unsigned CombinerHelper::getFPMinMaxOpcForSelect(
6594 CmpInst::Predicate Pred, LLT DstTy,
6595 SelectPatternNaNBehaviour VsNaNRetVal) const {
6596 assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE &&
6597 "Expected a NaN behaviour?");
6598 // Choose an opcode based off of legality or the behaviour when one of the
6599 // LHS/RHS may be NaN.
6600 switch (Pred) {
6601 default:
6602 return 0;
6603 case CmpInst::FCMP_UGT:
6604 case CmpInst::FCMP_UGE:
6605 case CmpInst::FCMP_OGT:
6606 case CmpInst::FCMP_OGE:
6607 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
6608 return TargetOpcode::G_FMAXNUM;
6609 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
6610 return TargetOpcode::G_FMAXIMUM;
6611 if (isLegal(Query: {TargetOpcode::G_FMAXNUM, {DstTy}}))
6612 return TargetOpcode::G_FMAXNUM;
6613 if (isLegal(Query: {TargetOpcode::G_FMAXIMUM, {DstTy}}))
6614 return TargetOpcode::G_FMAXIMUM;
6615 return 0;
6616 case CmpInst::FCMP_ULT:
6617 case CmpInst::FCMP_ULE:
6618 case CmpInst::FCMP_OLT:
6619 case CmpInst::FCMP_OLE:
6620 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER)
6621 return TargetOpcode::G_FMINNUM;
6622 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN)
6623 return TargetOpcode::G_FMINIMUM;
6624 if (isLegal(Query: {TargetOpcode::G_FMINNUM, {DstTy}}))
6625 return TargetOpcode::G_FMINNUM;
6626 if (!isLegal(Query: {TargetOpcode::G_FMINIMUM, {DstTy}}))
6627 return 0;
6628 return TargetOpcode::G_FMINIMUM;
6629 }
6630}
6631
6632CombinerHelper::SelectPatternNaNBehaviour
6633CombinerHelper::computeRetValAgainstNaN(Register LHS, Register RHS,
6634 bool IsOrderedComparison) const {
6635 bool LHSSafe = isKnownNeverNaN(Val: LHS, MRI);
6636 bool RHSSafe = isKnownNeverNaN(Val: RHS, MRI);
6637 // Completely unsafe.
6638 if (!LHSSafe && !RHSSafe)
6639 return SelectPatternNaNBehaviour::NOT_APPLICABLE;
6640 if (LHSSafe && RHSSafe)
6641 return SelectPatternNaNBehaviour::RETURNS_ANY;
6642 // An ordered comparison will return false when given a NaN, so it
6643 // returns the RHS.
6644 if (IsOrderedComparison)
6645 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN
6646 : SelectPatternNaNBehaviour::RETURNS_OTHER;
6647 // An unordered comparison will return true when given a NaN, so it
6648 // returns the LHS.
6649 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER
6650 : SelectPatternNaNBehaviour::RETURNS_NAN;
6651}
6652
6653bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond,
6654 Register TrueVal, Register FalseVal,
6655 BuildFnTy &MatchInfo) const {
6656 // Match: select (fcmp cond x, y) x, y
6657 // select (fcmp cond x, y) y, x
6658 // And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition.
6659 LLT DstTy = MRI.getType(Reg: Dst);
6660 // Bail out early on pointers, since we'll never want to fold to a min/max.
6661 if (DstTy.isPointer())
6662 return false;
6663 // Match a floating point compare with a less-than/greater-than predicate.
6664 // TODO: Allow multiple users of the compare if they are all selects.
6665 CmpInst::Predicate Pred;
6666 Register CmpLHS, CmpRHS;
6667 if (!mi_match(R: Cond, MRI,
6668 P: m_OneNonDBGUse(
6669 SP: m_GFCmp(P: m_Pred(P&: Pred), L: m_Reg(R&: CmpLHS), R: m_Reg(R&: CmpRHS)))) ||
6670 CmpInst::isEquality(pred: Pred))
6671 return false;
6672 SelectPatternNaNBehaviour ResWithKnownNaNInfo =
6673 computeRetValAgainstNaN(LHS: CmpLHS, RHS: CmpRHS, IsOrderedComparison: CmpInst::isOrdered(predicate: Pred));
6674 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE)
6675 return false;
6676 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6677 std::swap(a&: CmpLHS, b&: CmpRHS);
6678 Pred = CmpInst::getSwappedPredicate(pred: Pred);
6679 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN)
6680 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER;
6681 else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER)
6682 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN;
6683 }
6684 if (TrueVal != CmpLHS || FalseVal != CmpRHS)
6685 return false;
6686 // Decide what type of max/min this should be based off of the predicate.
6687 unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, VsNaNRetVal: ResWithKnownNaNInfo);
6688 if (!Opc || !isLegal(Query: {Opc, {DstTy}}))
6689 return false;
6690 // Comparisons between signed zero and zero may have different results...
6691 // unless we have fmaximum/fminimum. In that case, we know -0 < 0.
6692 if (Opc != TargetOpcode::G_FMAXIMUM && Opc != TargetOpcode::G_FMINIMUM) {
6693 // We don't know if a comparison between two 0s will give us a consistent
6694 // result. Be conservative and only proceed if at least one side is
6695 // non-zero.
6696 auto KnownNonZeroSide = getFConstantVRegValWithLookThrough(VReg: CmpLHS, MRI);
6697 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) {
6698 KnownNonZeroSide = getFConstantVRegValWithLookThrough(VReg: CmpRHS, MRI);
6699 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero())
6700 return false;
6701 }
6702 }
6703 MatchInfo = [=](MachineIRBuilder &B) {
6704 B.buildInstr(Opc, DstOps: {Dst}, SrcOps: {CmpLHS, CmpRHS});
6705 };
6706 return true;
6707}
6708
6709bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI,
6710 BuildFnTy &MatchInfo) const {
6711 // TODO: Handle integer cases.
6712 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
6713 // Condition may be fed by a truncated compare.
6714 Register Cond = MI.getOperand(i: 1).getReg();
6715 Register MaybeTrunc;
6716 if (mi_match(R: Cond, MRI, P: m_OneNonDBGUse(SP: m_GTrunc(Src: m_Reg(R&: MaybeTrunc)))))
6717 Cond = MaybeTrunc;
6718 Register Dst = MI.getOperand(i: 0).getReg();
6719 Register TrueVal = MI.getOperand(i: 2).getReg();
6720 Register FalseVal = MI.getOperand(i: 3).getReg();
6721 return matchFPSelectToMinMax(Dst, Cond, TrueVal, FalseVal, MatchInfo);
6722}
6723
6724bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI,
6725 BuildFnTy &MatchInfo) const {
6726 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
6727 // (X + Y) == X --> Y == 0
6728 // (X + Y) != X --> Y != 0
6729 // (X - Y) == X --> Y == 0
6730 // (X - Y) != X --> Y != 0
6731 // (X ^ Y) == X --> Y == 0
6732 // (X ^ Y) != X --> Y != 0
6733 Register Dst = MI.getOperand(i: 0).getReg();
6734 CmpInst::Predicate Pred;
6735 Register X, Y, OpLHS, OpRHS;
6736 bool MatchedSub = mi_match(
6737 R: Dst, MRI,
6738 P: m_c_GICmp(P: m_Pred(P&: Pred), L: m_Reg(R&: X), R: m_GSub(L: m_Reg(R&: OpLHS), R: m_Reg(R&: Y))));
6739 if (MatchedSub && X != OpLHS)
6740 return false;
6741 if (!MatchedSub) {
6742 if (!mi_match(R: Dst, MRI,
6743 P: m_c_GICmp(P: m_Pred(P&: Pred), L: m_Reg(R&: X),
6744 R: m_any_of(preds: m_GAdd(L: m_Reg(R&: OpLHS), R: m_Reg(R&: OpRHS)),
6745 preds: m_GXor(L: m_Reg(R&: OpLHS), R: m_Reg(R&: OpRHS))))))
6746 return false;
6747 Y = X == OpLHS ? OpRHS : X == OpRHS ? OpLHS : Register();
6748 }
6749 MatchInfo = [=](MachineIRBuilder &B) {
6750 auto Zero = B.buildConstant(Res: MRI.getType(Reg: Y), Val: 0);
6751 B.buildICmp(Pred, Res: Dst, Op0: Y, Op1: Zero);
6752 };
6753 return CmpInst::isEquality(pred: Pred) && Y.isValid();
6754}
6755
6756/// Return the minimum useless shift amount that results in complete loss of the
6757/// source value. Return std::nullopt when it cannot determine a value.
6758static std::optional<unsigned>
6759getMinUselessShift(KnownBits ValueKB, unsigned Opcode,
6760 std::optional<int64_t> &Result) {
6761 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR ||
6762 Opcode == TargetOpcode::G_ASHR) &&
6763 "Expect G_SHL, G_LSHR or G_ASHR.");
6764 auto SignificantBits = 0;
6765 switch (Opcode) {
6766 case TargetOpcode::G_SHL:
6767 SignificantBits = ValueKB.countMinTrailingZeros();
6768 Result = 0;
6769 break;
6770 case TargetOpcode::G_LSHR:
6771 Result = 0;
6772 SignificantBits = ValueKB.countMinLeadingZeros();
6773 break;
6774 case TargetOpcode::G_ASHR:
6775 if (ValueKB.isNonNegative()) {
6776 SignificantBits = ValueKB.countMinLeadingZeros();
6777 Result = 0;
6778 } else if (ValueKB.isNegative()) {
6779 SignificantBits = ValueKB.countMinLeadingOnes();
6780 Result = -1;
6781 } else {
6782 // Cannot determine shift result.
6783 Result = std::nullopt;
6784 }
6785 break;
6786 default:
6787 break;
6788 }
6789 return ValueKB.getBitWidth() - SignificantBits;
6790}
6791
6792bool CombinerHelper::matchShiftsTooBig(
6793 MachineInstr &MI, std::optional<int64_t> &MatchInfo) const {
6794 Register ShiftVal = MI.getOperand(i: 1).getReg();
6795 Register ShiftReg = MI.getOperand(i: 2).getReg();
6796 LLT ResTy = MRI.getType(Reg: MI.getOperand(i: 0).getReg());
6797 auto IsShiftTooBig = [&](const Constant *C) {
6798 auto *CI = dyn_cast<ConstantInt>(Val: C);
6799 if (!CI)
6800 return false;
6801 if (CI->uge(Num: ResTy.getScalarSizeInBits())) {
6802 MatchInfo = std::nullopt;
6803 return true;
6804 }
6805 auto OptMaxUsefulShift = getMinUselessShift(ValueKB: VT->getKnownBits(R: ShiftVal),
6806 Opcode: MI.getOpcode(), Result&: MatchInfo);
6807 return OptMaxUsefulShift && CI->uge(Num: *OptMaxUsefulShift);
6808 };
6809 return matchUnaryPredicate(MRI, Reg: ShiftReg, Match: IsShiftTooBig);
6810}
6811
6812bool CombinerHelper::matchCommuteConstantToRHS(MachineInstr &MI) const {
6813 unsigned LHSOpndIdx = 1;
6814 unsigned RHSOpndIdx = 2;
6815 switch (MI.getOpcode()) {
6816 case TargetOpcode::G_UADDO:
6817 case TargetOpcode::G_SADDO:
6818 case TargetOpcode::G_UMULO:
6819 case TargetOpcode::G_SMULO:
6820 LHSOpndIdx = 2;
6821 RHSOpndIdx = 3;
6822 break;
6823 default:
6824 break;
6825 }
6826 Register LHS = MI.getOperand(i: LHSOpndIdx).getReg();
6827 Register RHS = MI.getOperand(i: RHSOpndIdx).getReg();
6828 if (!getIConstantVRegVal(VReg: LHS, MRI)) {
6829 // Skip commuting if LHS is not a constant. But, LHS may be a
6830 // G_CONSTANT_FOLD_BARRIER. If so we commute as long as we don't already
6831 // have a constant on the RHS.
6832 if (MRI.getVRegDef(Reg: LHS)->getOpcode() !=
6833 TargetOpcode::G_CONSTANT_FOLD_BARRIER)
6834 return false;
6835 }
6836 // Commute as long as RHS is not a constant or G_CONSTANT_FOLD_BARRIER.
6837 return MRI.getVRegDef(Reg: RHS)->getOpcode() !=
6838 TargetOpcode::G_CONSTANT_FOLD_BARRIER &&
6839 !getIConstantVRegVal(VReg: RHS, MRI);
6840}
6841
6842bool CombinerHelper::matchCommuteFPConstantToRHS(MachineInstr &MI) const {
6843 Register LHS = MI.getOperand(i: 1).getReg();
6844 Register RHS = MI.getOperand(i: 2).getReg();
6845 std::optional<FPValueAndVReg> ValAndVReg;
6846 if (!mi_match(R: LHS, MRI, P: m_GFCstOrSplat(FPValReg&: ValAndVReg)))
6847 return false;
6848 return !mi_match(R: RHS, MRI, P: m_GFCstOrSplat(FPValReg&: ValAndVReg));
6849}
6850
6851void CombinerHelper::applyCommuteBinOpOperands(MachineInstr &MI) const {
6852 Observer.changingInstr(MI);
6853 unsigned LHSOpndIdx = 1;
6854 unsigned RHSOpndIdx = 2;
6855 switch (MI.getOpcode()) {
6856 case TargetOpcode::G_UADDO:
6857 case TargetOpcode::G_SADDO:
6858 case TargetOpcode::G_UMULO:
6859 case TargetOpcode::G_SMULO:
6860 LHSOpndIdx = 2;
6861 RHSOpndIdx = 3;
6862 break;
6863 default:
6864 break;
6865 }
6866 Register LHSReg = MI.getOperand(i: LHSOpndIdx).getReg();
6867 Register RHSReg = MI.getOperand(i: RHSOpndIdx).getReg();
6868 MI.getOperand(i: LHSOpndIdx).setReg(RHSReg);
6869 MI.getOperand(i: RHSOpndIdx).setReg(LHSReg);
6870 Observer.changedInstr(MI);
6871}
6872
6873bool CombinerHelper::isOneOrOneSplat(Register Src, bool AllowUndefs) const {
6874 LLT SrcTy = MRI.getType(Reg: Src);
6875 if (SrcTy.isFixedVector())
6876 return isConstantSplatVector(Src, SplatValue: 1, AllowUndefs);
6877 if (SrcTy.isScalar()) {
6878 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Reg: Src, MRI) != nullptr)
6879 return true;
6880 auto IConstant = getIConstantVRegValWithLookThrough(VReg: Src, MRI);
6881 return IConstant && IConstant->Value == 1;
6882 }
6883 return false; // scalable vector
6884}
6885
6886bool CombinerHelper::isZeroOrZeroSplat(Register Src, bool AllowUndefs) const {
6887 LLT SrcTy = MRI.getType(Reg: Src);
6888 if (SrcTy.isFixedVector())
6889 return isConstantSplatVector(Src, SplatValue: 0, AllowUndefs);
6890 if (SrcTy.isScalar()) {
6891 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Reg: Src, MRI) != nullptr)
6892 return true;
6893 auto IConstant = getIConstantVRegValWithLookThrough(VReg: Src, MRI);
6894 return IConstant && IConstant->Value == 0;
6895 }
6896 return false; // scalable vector
6897}
6898
6899// Ignores COPYs during conformance checks.
6900// FIXME scalable vectors.
6901bool CombinerHelper::isConstantSplatVector(Register Src, int64_t SplatValue,
6902 bool AllowUndefs) const {
6903 GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Reg: Src, MRI);
6904 if (!BuildVector)
6905 return false;
6906 unsigned NumSources = BuildVector->getNumSources();
6907
6908 for (unsigned I = 0; I < NumSources; ++I) {
6909 GImplicitDef *ImplicitDef =
6910 getOpcodeDef<GImplicitDef>(Reg: BuildVector->getSourceReg(I), MRI);
6911 if (ImplicitDef && AllowUndefs)
6912 continue;
6913 if (ImplicitDef && !AllowUndefs)
6914 return false;
6915 std::optional<ValueAndVReg> IConstant =
6916 getIConstantVRegValWithLookThrough(VReg: BuildVector->getSourceReg(I), MRI);
6917 if (IConstant && IConstant->Value == SplatValue)
6918 continue;
6919 return false;
6920 }
6921 return true;
6922}
6923
6924// Ignores COPYs during lookups.
6925// FIXME scalable vectors
6926std::optional<APInt>
6927CombinerHelper::getConstantOrConstantSplatVector(Register Src) const {
6928 auto IConstant = getIConstantVRegValWithLookThrough(VReg: Src, MRI);
6929 if (IConstant)
6930 return IConstant->Value;
6931
6932 GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Reg: Src, MRI);
6933 if (!BuildVector)
6934 return std::nullopt;
6935 unsigned NumSources = BuildVector->getNumSources();
6936
6937 std::optional<APInt> Value = std::nullopt;
6938 for (unsigned I = 0; I < NumSources; ++I) {
6939 std::optional<ValueAndVReg> IConstant =
6940 getIConstantVRegValWithLookThrough(VReg: BuildVector->getSourceReg(I), MRI);
6941 if (!IConstant)
6942 return std::nullopt;
6943 if (!Value)
6944 Value = IConstant->Value;
6945 else if (*Value != IConstant->Value)
6946 return std::nullopt;
6947 }
6948 return Value;
6949}
6950
6951// FIXME G_SPLAT_VECTOR
6952bool CombinerHelper::isConstantOrConstantVectorI(Register Src) const {
6953 auto IConstant = getIConstantVRegValWithLookThrough(VReg: Src, MRI);
6954 if (IConstant)
6955 return true;
6956
6957 GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Reg: Src, MRI);
6958 if (!BuildVector)
6959 return false;
6960
6961 unsigned NumSources = BuildVector->getNumSources();
6962 for (unsigned I = 0; I < NumSources; ++I) {
6963 std::optional<ValueAndVReg> IConstant =
6964 getIConstantVRegValWithLookThrough(VReg: BuildVector->getSourceReg(I), MRI);
6965 if (!IConstant)
6966 return false;
6967 }
6968 return true;
6969}
6970
6971// TODO: use knownbits to determine zeros
6972bool CombinerHelper::tryFoldSelectOfConstants(GSelect *Select,
6973 BuildFnTy &MatchInfo) const {
6974 uint32_t Flags = Select->getFlags();
6975 Register Dest = Select->getReg(Idx: 0);
6976 Register Cond = Select->getCondReg();
6977 Register True = Select->getTrueReg();
6978 Register False = Select->getFalseReg();
6979 LLT CondTy = MRI.getType(Reg: Select->getCondReg());
6980 LLT TrueTy = MRI.getType(Reg: Select->getTrueReg());
6981
6982 // We only do this combine for scalar boolean conditions.
6983 if (CondTy != LLT::scalar(SizeInBits: 1))
6984 return false;
6985
6986 if (TrueTy.isPointer())
6987 return false;
6988
6989 // Both are scalars.
6990 std::optional<ValueAndVReg> TrueOpt =
6991 getIConstantVRegValWithLookThrough(VReg: True, MRI);
6992 std::optional<ValueAndVReg> FalseOpt =
6993 getIConstantVRegValWithLookThrough(VReg: False, MRI);
6994
6995 if (!TrueOpt || !FalseOpt)
6996 return false;
6997
6998 APInt TrueValue = TrueOpt->Value;
6999 APInt FalseValue = FalseOpt->Value;
7000
7001 // select Cond, 1, 0 --> zext (Cond)
7002 if (TrueValue.isOne() && FalseValue.isZero()) {
7003 MatchInfo = [=](MachineIRBuilder &B) {
7004 B.setInstrAndDebugLoc(*Select);
7005 B.buildZExtOrTrunc(Res: Dest, Op: Cond);
7006 };
7007 return true;
7008 }
7009
7010 // select Cond, -1, 0 --> sext (Cond)
7011 if (TrueValue.isAllOnes() && FalseValue.isZero()) {
7012 MatchInfo = [=](MachineIRBuilder &B) {
7013 B.setInstrAndDebugLoc(*Select);
7014 B.buildSExtOrTrunc(Res: Dest, Op: Cond);
7015 };
7016 return true;
7017 }
7018
7019 // select Cond, 0, 1 --> zext (!Cond)
7020 if (TrueValue.isZero() && FalseValue.isOne()) {
7021 MatchInfo = [=](MachineIRBuilder &B) {
7022 B.setInstrAndDebugLoc(*Select);
7023 Register Inner = MRI.createGenericVirtualRegister(Ty: CondTy);
7024 B.buildNot(Dst: Inner, Src0: Cond);
7025 B.buildZExtOrTrunc(Res: Dest, Op: Inner);
7026 };
7027 return true;
7028 }
7029
7030 // select Cond, 0, -1 --> sext (!Cond)
7031 if (TrueValue.isZero() && FalseValue.isAllOnes()) {
7032 MatchInfo = [=](MachineIRBuilder &B) {
7033 B.setInstrAndDebugLoc(*Select);
7034 Register Inner = MRI.createGenericVirtualRegister(Ty: CondTy);
7035 B.buildNot(Dst: Inner, Src0: Cond);
7036 B.buildSExtOrTrunc(Res: Dest, Op: Inner);
7037 };
7038 return true;
7039 }
7040
7041 // select Cond, C1, C1-1 --> add (zext Cond), C1-1
7042 if (TrueValue - 1 == FalseValue) {
7043 MatchInfo = [=](MachineIRBuilder &B) {
7044 B.setInstrAndDebugLoc(*Select);
7045 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7046 B.buildZExtOrTrunc(Res: Inner, Op: Cond);
7047 B.buildAdd(Dst: Dest, Src0: Inner, Src1: False);
7048 };
7049 return true;
7050 }
7051
7052 // select Cond, C1, C1+1 --> add (sext Cond), C1+1
7053 if (TrueValue + 1 == FalseValue) {
7054 MatchInfo = [=](MachineIRBuilder &B) {
7055 B.setInstrAndDebugLoc(*Select);
7056 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7057 B.buildSExtOrTrunc(Res: Inner, Op: Cond);
7058 B.buildAdd(Dst: Dest, Src0: Inner, Src1: False);
7059 };
7060 return true;
7061 }
7062
7063 // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2)
7064 if (TrueValue.isPowerOf2() && FalseValue.isZero()) {
7065 MatchInfo = [=](MachineIRBuilder &B) {
7066 B.setInstrAndDebugLoc(*Select);
7067 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7068 B.buildZExtOrTrunc(Res: Inner, Op: Cond);
7069 // The shift amount must be scalar.
7070 LLT ShiftTy = TrueTy.isVector() ? TrueTy.getElementType() : TrueTy;
7071 auto ShAmtC = B.buildConstant(Res: ShiftTy, Val: TrueValue.exactLogBase2());
7072 B.buildShl(Dst: Dest, Src0: Inner, Src1: ShAmtC, Flags);
7073 };
7074 return true;
7075 }
7076
7077 // select Cond, 0, Pow2 --> (zext (!Cond)) << log2(Pow2)
7078 if (FalseValue.isPowerOf2() && TrueValue.isZero()) {
7079 MatchInfo = [=](MachineIRBuilder &B) {
7080 B.setInstrAndDebugLoc(*Select);
7081 Register Not = MRI.createGenericVirtualRegister(Ty: CondTy);
7082 B.buildNot(Dst: Not, Src0: Cond);
7083 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7084 B.buildZExtOrTrunc(Res: Inner, Op: Not);
7085 // The shift amount must be scalar.
7086 LLT ShiftTy = TrueTy.isVector() ? TrueTy.getElementType() : TrueTy;
7087 auto ShAmtC = B.buildConstant(Res: ShiftTy, Val: FalseValue.exactLogBase2());
7088 B.buildShl(Dst: Dest, Src0: Inner, Src1: ShAmtC, Flags);
7089 };
7090 return true;
7091 }
7092
7093 // select Cond, -1, C --> or (sext Cond), C
7094 if (TrueValue.isAllOnes()) {
7095 MatchInfo = [=](MachineIRBuilder &B) {
7096 B.setInstrAndDebugLoc(*Select);
7097 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7098 B.buildSExtOrTrunc(Res: Inner, Op: Cond);
7099 B.buildOr(Dst: Dest, Src0: Inner, Src1: False, Flags);
7100 };
7101 return true;
7102 }
7103
7104 // select Cond, C, -1 --> or (sext (not Cond)), C
7105 if (FalseValue.isAllOnes()) {
7106 MatchInfo = [=](MachineIRBuilder &B) {
7107 B.setInstrAndDebugLoc(*Select);
7108 Register Not = MRI.createGenericVirtualRegister(Ty: CondTy);
7109 B.buildNot(Dst: Not, Src0: Cond);
7110 Register Inner = MRI.createGenericVirtualRegister(Ty: TrueTy);
7111 B.buildSExtOrTrunc(Res: Inner, Op: Not);
7112 B.buildOr(Dst: Dest, Src0: Inner, Src1: True, Flags);
7113 };
7114 return true;
7115 }
7116
7117 return false;
7118}
7119
7120// TODO: use knownbits to determine zeros
7121bool CombinerHelper::tryFoldBoolSelectToLogic(GSelect *Select,
7122 BuildFnTy &MatchInfo) const {
7123 uint32_t Flags = Select->getFlags();
7124 Register DstReg = Select->getReg(Idx: 0);
7125 Register Cond = Select->getCondReg();
7126 Register True = Select->getTrueReg();
7127 Register False = Select->getFalseReg();
7128 LLT CondTy = MRI.getType(Reg: Select->getCondReg());
7129 LLT TrueTy = MRI.getType(Reg: Select->getTrueReg());
7130
7131 // Boolean or fixed vector of booleans.
7132 if (CondTy.isScalableVector() ||
7133 (CondTy.isFixedVector() &&
7134 CondTy.getElementType().getScalarSizeInBits() != 1) ||
7135 CondTy.getScalarSizeInBits() != 1)
7136 return false;
7137
7138 if (CondTy != TrueTy)
7139 return false;
7140
7141 // select Cond, Cond, F --> or Cond, F
7142 // select Cond, 1, F --> or Cond, F
7143 if ((Cond == True) || isOneOrOneSplat(Src: True, /* AllowUndefs */ true)) {
7144 MatchInfo = [=](MachineIRBuilder &B) {
7145 B.setInstrAndDebugLoc(*Select);
7146 Register Ext = MRI.createGenericVirtualRegister(Ty: TrueTy);
7147 B.buildZExtOrTrunc(Res: Ext, Op: Cond);
7148 auto FreezeFalse = B.buildFreeze(Dst: TrueTy, Src: False);
7149 B.buildOr(Dst: DstReg, Src0: Ext, Src1: FreezeFalse, Flags);
7150 };
7151 return true;
7152 }
7153
7154 // select Cond, T, Cond --> and Cond, T
7155 // select Cond, T, 0 --> and Cond, T
7156 if ((Cond == False) || isZeroOrZeroSplat(Src: False, /* AllowUndefs */ true)) {
7157 MatchInfo = [=](MachineIRBuilder &B) {
7158 B.setInstrAndDebugLoc(*Select);
7159 Register Ext = MRI.createGenericVirtualRegister(Ty: TrueTy);
7160 B.buildZExtOrTrunc(Res: Ext, Op: Cond);
7161 auto FreezeTrue = B.buildFreeze(Dst: TrueTy, Src: True);
7162 B.buildAnd(Dst: DstReg, Src0: Ext, Src1: FreezeTrue);
7163 };
7164 return true;
7165 }
7166
7167 // select Cond, T, 1 --> or (not Cond), T
7168 if (isOneOrOneSplat(Src: False, /* AllowUndefs */ true)) {
7169 MatchInfo = [=](MachineIRBuilder &B) {
7170 B.setInstrAndDebugLoc(*Select);
7171 // First the not.
7172 Register Inner = MRI.createGenericVirtualRegister(Ty: CondTy);
7173 B.buildNot(Dst: Inner, Src0: Cond);
7174 // Then an ext to match the destination register.
7175 Register Ext = MRI.createGenericVirtualRegister(Ty: TrueTy);
7176 B.buildZExtOrTrunc(Res: Ext, Op: Inner);
7177 auto FreezeTrue = B.buildFreeze(Dst: TrueTy, Src: True);
7178 B.buildOr(Dst: DstReg, Src0: Ext, Src1: FreezeTrue, Flags);
7179 };
7180 return true;
7181 }
7182
7183 // select Cond, 0, F --> and (not Cond), F
7184 if (isZeroOrZeroSplat(Src: True, /* AllowUndefs */ true)) {
7185 MatchInfo = [=](MachineIRBuilder &B) {
7186 B.setInstrAndDebugLoc(*Select);
7187 // First the not.
7188 Register Inner = MRI.createGenericVirtualRegister(Ty: CondTy);
7189 B.buildNot(Dst: Inner, Src0: Cond);
7190 // Then an ext to match the destination register.
7191 Register Ext = MRI.createGenericVirtualRegister(Ty: TrueTy);
7192 B.buildZExtOrTrunc(Res: Ext, Op: Inner);
7193 auto FreezeFalse = B.buildFreeze(Dst: TrueTy, Src: False);
7194 B.buildAnd(Dst: DstReg, Src0: Ext, Src1: FreezeFalse);
7195 };
7196 return true;
7197 }
7198
7199 return false;
7200}
7201
7202bool CombinerHelper::matchSelectIMinMax(const MachineOperand &MO,
7203 BuildFnTy &MatchInfo) const {
7204 GSelect *Select = cast<GSelect>(Val: MRI.getVRegDef(Reg: MO.getReg()));
7205 GICmp *Cmp = cast<GICmp>(Val: MRI.getVRegDef(Reg: Select->getCondReg()));
7206
7207 Register DstReg = Select->getReg(Idx: 0);
7208 Register True = Select->getTrueReg();
7209 Register False = Select->getFalseReg();
7210 LLT DstTy = MRI.getType(Reg: DstReg);
7211
7212 if (DstTy.isPointer())
7213 return false;
7214
7215 // We want to fold the icmp and replace the select.
7216 if (!MRI.hasOneNonDBGUse(RegNo: Cmp->getReg(Idx: 0)))
7217 return false;
7218
7219 CmpInst::Predicate Pred = Cmp->getCond();
7220 // We need a larger or smaller predicate for
7221 // canonicalization.
7222 if (CmpInst::isEquality(pred: Pred))
7223 return false;
7224
7225 Register CmpLHS = Cmp->getLHSReg();
7226 Register CmpRHS = Cmp->getRHSReg();
7227
7228 // We can swap CmpLHS and CmpRHS for higher hitrate.
7229 if (True == CmpRHS && False == CmpLHS) {
7230 std::swap(a&: CmpLHS, b&: CmpRHS);
7231 Pred = CmpInst::getSwappedPredicate(pred: Pred);
7232 }
7233
7234 // (icmp X, Y) ? X : Y -> integer minmax.
7235 // see matchSelectPattern in ValueTracking.
7236 // Legality between G_SELECT and integer minmax can differ.
7237 if (True != CmpLHS || False != CmpRHS)
7238 return false;
7239
7240 switch (Pred) {
7241 case ICmpInst::ICMP_UGT:
7242 case ICmpInst::ICMP_UGE: {
7243 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_UMAX, DstTy}))
7244 return false;
7245 MatchInfo = [=](MachineIRBuilder &B) { B.buildUMax(Dst: DstReg, Src0: True, Src1: False); };
7246 return true;
7247 }
7248 case ICmpInst::ICMP_SGT:
7249 case ICmpInst::ICMP_SGE: {
7250 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SMAX, DstTy}))
7251 return false;
7252 MatchInfo = [=](MachineIRBuilder &B) { B.buildSMax(Dst: DstReg, Src0: True, Src1: False); };
7253 return true;
7254 }
7255 case ICmpInst::ICMP_ULT:
7256 case ICmpInst::ICMP_ULE: {
7257 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_UMIN, DstTy}))
7258 return false;
7259 MatchInfo = [=](MachineIRBuilder &B) { B.buildUMin(Dst: DstReg, Src0: True, Src1: False); };
7260 return true;
7261 }
7262 case ICmpInst::ICMP_SLT:
7263 case ICmpInst::ICMP_SLE: {
7264 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SMIN, DstTy}))
7265 return false;
7266 MatchInfo = [=](MachineIRBuilder &B) { B.buildSMin(Dst: DstReg, Src0: True, Src1: False); };
7267 return true;
7268 }
7269 default:
7270 return false;
7271 }
7272}
7273
7274// (neg (min/max x, (neg x))) --> (max/min x, (neg x))
7275bool CombinerHelper::matchSimplifyNegMinMax(MachineInstr &MI,
7276 BuildFnTy &MatchInfo) const {
7277 assert(MI.getOpcode() == TargetOpcode::G_SUB);
7278 Register DestReg = MI.getOperand(i: 0).getReg();
7279 LLT DestTy = MRI.getType(Reg: DestReg);
7280
7281 Register X;
7282 Register Sub0;
7283 auto NegPattern = m_all_of(preds: m_Neg(Src: m_DeferredReg(R&: X)), preds: m_Reg(R&: Sub0));
7284 if (mi_match(R: DestReg, MRI,
7285 P: m_Neg(Src: m_OneUse(SP: m_any_of(preds: m_GSMin(L: m_Reg(R&: X), R: NegPattern),
7286 preds: m_GSMax(L: m_Reg(R&: X), R: NegPattern),
7287 preds: m_GUMin(L: m_Reg(R&: X), R: NegPattern),
7288 preds: m_GUMax(L: m_Reg(R&: X), R: NegPattern)))))) {
7289 MachineInstr *MinMaxMI = MRI.getVRegDef(Reg: MI.getOperand(i: 2).getReg());
7290 unsigned NewOpc = getInverseGMinMaxOpcode(MinMaxOpc: MinMaxMI->getOpcode());
7291 if (isLegal(Query: {NewOpc, {DestTy}})) {
7292 MatchInfo = [=](MachineIRBuilder &B) {
7293 B.buildInstr(Opc: NewOpc, DstOps: {DestReg}, SrcOps: {X, Sub0});
7294 };
7295 return true;
7296 }
7297 }
7298
7299 return false;
7300}
7301
7302bool CombinerHelper::matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const {
7303 GSelect *Select = cast<GSelect>(Val: &MI);
7304
7305 if (tryFoldSelectOfConstants(Select, MatchInfo))
7306 return true;
7307
7308 if (tryFoldBoolSelectToLogic(Select, MatchInfo))
7309 return true;
7310
7311 return false;
7312}
7313
7314/// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
7315/// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
7316/// into a single comparison using range-based reasoning.
7317/// see InstCombinerImpl::foldAndOrOfICmpsUsingRanges.
7318bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(
7319 GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const {
7320 assert(Logic->getOpcode() != TargetOpcode::G_XOR && "unexpected xor");
7321 bool IsAnd = Logic->getOpcode() == TargetOpcode::G_AND;
7322 Register DstReg = Logic->getReg(Idx: 0);
7323 Register LHS = Logic->getLHSReg();
7324 Register RHS = Logic->getRHSReg();
7325 unsigned Flags = Logic->getFlags();
7326
7327 // We need an G_ICMP on the LHS register.
7328 GICmp *Cmp1 = getOpcodeDef<GICmp>(Reg: LHS, MRI);
7329 if (!Cmp1)
7330 return false;
7331
7332 // We need an G_ICMP on the RHS register.
7333 GICmp *Cmp2 = getOpcodeDef<GICmp>(Reg: RHS, MRI);
7334 if (!Cmp2)
7335 return false;
7336
7337 // We want to fold the icmps.
7338 if (!MRI.hasOneNonDBGUse(RegNo: Cmp1->getReg(Idx: 0)) ||
7339 !MRI.hasOneNonDBGUse(RegNo: Cmp2->getReg(Idx: 0)))
7340 return false;
7341
7342 APInt C1;
7343 APInt C2;
7344 std::optional<ValueAndVReg> MaybeC1 =
7345 getIConstantVRegValWithLookThrough(VReg: Cmp1->getRHSReg(), MRI);
7346 if (!MaybeC1)
7347 return false;
7348 C1 = MaybeC1->Value;
7349
7350 std::optional<ValueAndVReg> MaybeC2 =
7351 getIConstantVRegValWithLookThrough(VReg: Cmp2->getRHSReg(), MRI);
7352 if (!MaybeC2)
7353 return false;
7354 C2 = MaybeC2->Value;
7355
7356 Register R1 = Cmp1->getLHSReg();
7357 Register R2 = Cmp2->getLHSReg();
7358 CmpInst::Predicate Pred1 = Cmp1->getCond();
7359 CmpInst::Predicate Pred2 = Cmp2->getCond();
7360 LLT CmpTy = MRI.getType(Reg: Cmp1->getReg(Idx: 0));
7361 LLT CmpOperandTy = MRI.getType(Reg: R1);
7362
7363 if (CmpOperandTy.isPointer())
7364 return false;
7365
7366 // We build ands, adds, and constants of type CmpOperandTy.
7367 // They must be legal to build.
7368 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_AND, CmpOperandTy}) ||
7369 !isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_ADD, CmpOperandTy}) ||
7370 !isConstantLegalOrBeforeLegalizer(Ty: CmpOperandTy))
7371 return false;
7372
7373 // Look through add of a constant offset on R1, R2, or both operands. This
7374 // allows us to interpret the R + C' < C'' range idiom into a proper range.
7375 std::optional<APInt> Offset1;
7376 std::optional<APInt> Offset2;
7377 if (R1 != R2) {
7378 if (GAdd *Add = getOpcodeDef<GAdd>(Reg: R1, MRI)) {
7379 std::optional<ValueAndVReg> MaybeOffset1 =
7380 getIConstantVRegValWithLookThrough(VReg: Add->getRHSReg(), MRI);
7381 if (MaybeOffset1) {
7382 R1 = Add->getLHSReg();
7383 Offset1 = MaybeOffset1->Value;
7384 }
7385 }
7386 if (GAdd *Add = getOpcodeDef<GAdd>(Reg: R2, MRI)) {
7387 std::optional<ValueAndVReg> MaybeOffset2 =
7388 getIConstantVRegValWithLookThrough(VReg: Add->getRHSReg(), MRI);
7389 if (MaybeOffset2) {
7390 R2 = Add->getLHSReg();
7391 Offset2 = MaybeOffset2->Value;
7392 }
7393 }
7394 }
7395
7396 if (R1 != R2)
7397 return false;
7398
7399 // We calculate the icmp ranges including maybe offsets.
7400 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(
7401 Pred: IsAnd ? ICmpInst::getInversePredicate(pred: Pred1) : Pred1, Other: C1);
7402 if (Offset1)
7403 CR1 = CR1.subtract(CI: *Offset1);
7404
7405 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(
7406 Pred: IsAnd ? ICmpInst::getInversePredicate(pred: Pred2) : Pred2, Other: C2);
7407 if (Offset2)
7408 CR2 = CR2.subtract(CI: *Offset2);
7409
7410 bool CreateMask = false;
7411 APInt LowerDiff;
7412 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR: CR2);
7413 if (!CR) {
7414 // We need non-wrapping ranges.
7415 if (CR1.isWrappedSet() || CR2.isWrappedSet())
7416 return false;
7417
7418 // Check whether we have equal-size ranges that only differ by one bit.
7419 // In that case we can apply a mask to map one range onto the other.
7420 LowerDiff = CR1.getLower() ^ CR2.getLower();
7421 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
7422 APInt CR1Size = CR1.getUpper() - CR1.getLower();
7423 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
7424 CR1Size != CR2.getUpper() - CR2.getLower())
7425 return false;
7426
7427 CR = CR1.getLower().ult(RHS: CR2.getLower()) ? CR1 : CR2;
7428 CreateMask = true;
7429 }
7430
7431 if (IsAnd)
7432 CR = CR->inverse();
7433
7434 CmpInst::Predicate NewPred;
7435 APInt NewC, Offset;
7436 CR->getEquivalentICmp(Pred&: NewPred, RHS&: NewC, Offset);
7437
7438 // We take the result type of one of the original icmps, CmpTy, for
7439 // the to be build icmp. The operand type, CmpOperandTy, is used for
7440 // the other instructions and constants to be build. The types of
7441 // the parameters and output are the same for add and and. CmpTy
7442 // and the type of DstReg might differ. That is why we zext or trunc
7443 // the icmp into the destination register.
7444
7445 MatchInfo = [=](MachineIRBuilder &B) {
7446 if (CreateMask && Offset != 0) {
7447 auto TildeLowerDiff = B.buildConstant(Res: CmpOperandTy, Val: ~LowerDiff);
7448 auto And = B.buildAnd(Dst: CmpOperandTy, Src0: R1, Src1: TildeLowerDiff); // the mask.
7449 auto OffsetC = B.buildConstant(Res: CmpOperandTy, Val: Offset);
7450 auto Add = B.buildAdd(Dst: CmpOperandTy, Src0: And, Src1: OffsetC, Flags);
7451 auto NewCon = B.buildConstant(Res: CmpOperandTy, Val: NewC);
7452 auto ICmp = B.buildICmp(Pred: NewPred, Res: CmpTy, Op0: Add, Op1: NewCon);
7453 B.buildZExtOrTrunc(Res: DstReg, Op: ICmp);
7454 } else if (CreateMask && Offset == 0) {
7455 auto TildeLowerDiff = B.buildConstant(Res: CmpOperandTy, Val: ~LowerDiff);
7456 auto And = B.buildAnd(Dst: CmpOperandTy, Src0: R1, Src1: TildeLowerDiff); // the mask.
7457 auto NewCon = B.buildConstant(Res: CmpOperandTy, Val: NewC);
7458 auto ICmp = B.buildICmp(Pred: NewPred, Res: CmpTy, Op0: And, Op1: NewCon);
7459 B.buildZExtOrTrunc(Res: DstReg, Op: ICmp);
7460 } else if (!CreateMask && Offset != 0) {
7461 auto OffsetC = B.buildConstant(Res: CmpOperandTy, Val: Offset);
7462 auto Add = B.buildAdd(Dst: CmpOperandTy, Src0: R1, Src1: OffsetC, Flags);
7463 auto NewCon = B.buildConstant(Res: CmpOperandTy, Val: NewC);
7464 auto ICmp = B.buildICmp(Pred: NewPred, Res: CmpTy, Op0: Add, Op1: NewCon);
7465 B.buildZExtOrTrunc(Res: DstReg, Op: ICmp);
7466 } else if (!CreateMask && Offset == 0) {
7467 auto NewCon = B.buildConstant(Res: CmpOperandTy, Val: NewC);
7468 auto ICmp = B.buildICmp(Pred: NewPred, Res: CmpTy, Op0: R1, Op1: NewCon);
7469 B.buildZExtOrTrunc(Res: DstReg, Op: ICmp);
7470 } else {
7471 llvm_unreachable("unexpected configuration of CreateMask and Offset");
7472 }
7473 };
7474 return true;
7475}
7476
7477bool CombinerHelper::tryFoldLogicOfFCmps(GLogicalBinOp *Logic,
7478 BuildFnTy &MatchInfo) const {
7479 assert(Logic->getOpcode() != TargetOpcode::G_XOR && "unexpecte xor");
7480 Register DestReg = Logic->getReg(Idx: 0);
7481 Register LHS = Logic->getLHSReg();
7482 Register RHS = Logic->getRHSReg();
7483 bool IsAnd = Logic->getOpcode() == TargetOpcode::G_AND;
7484
7485 // We need a compare on the LHS register.
7486 GFCmp *Cmp1 = getOpcodeDef<GFCmp>(Reg: LHS, MRI);
7487 if (!Cmp1)
7488 return false;
7489
7490 // We need a compare on the RHS register.
7491 GFCmp *Cmp2 = getOpcodeDef<GFCmp>(Reg: RHS, MRI);
7492 if (!Cmp2)
7493 return false;
7494
7495 LLT CmpTy = MRI.getType(Reg: Cmp1->getReg(Idx: 0));
7496 LLT CmpOperandTy = MRI.getType(Reg: Cmp1->getLHSReg());
7497
7498 // We build one fcmp, want to fold the fcmps, replace the logic op,
7499 // and the fcmps must have the same shape.
7500 if (!isLegalOrBeforeLegalizer(
7501 Query: {TargetOpcode::G_FCMP, {CmpTy, CmpOperandTy}}) ||
7502 !MRI.hasOneNonDBGUse(RegNo: Logic->getReg(Idx: 0)) ||
7503 !MRI.hasOneNonDBGUse(RegNo: Cmp1->getReg(Idx: 0)) ||
7504 !MRI.hasOneNonDBGUse(RegNo: Cmp2->getReg(Idx: 0)) ||
7505 MRI.getType(Reg: Cmp1->getLHSReg()) != MRI.getType(Reg: Cmp2->getLHSReg()))
7506 return false;
7507
7508 CmpInst::Predicate PredL = Cmp1->getCond();
7509 CmpInst::Predicate PredR = Cmp2->getCond();
7510 Register LHS0 = Cmp1->getLHSReg();
7511 Register LHS1 = Cmp1->getRHSReg();
7512 Register RHS0 = Cmp2->getLHSReg();
7513 Register RHS1 = Cmp2->getRHSReg();
7514
7515 if (LHS0 == RHS1 && LHS1 == RHS0) {
7516 // Swap RHS operands to match LHS.
7517 PredR = CmpInst::getSwappedPredicate(pred: PredR);
7518 std::swap(a&: RHS0, b&: RHS1);
7519 }
7520
7521 if (LHS0 == RHS0 && LHS1 == RHS1) {
7522 // We determine the new predicate.
7523 unsigned CmpCodeL = getFCmpCode(CC: PredL);
7524 unsigned CmpCodeR = getFCmpCode(CC: PredR);
7525 unsigned NewPred = IsAnd ? CmpCodeL & CmpCodeR : CmpCodeL | CmpCodeR;
7526 unsigned Flags = Cmp1->getFlags() | Cmp2->getFlags();
7527 MatchInfo = [=](MachineIRBuilder &B) {
7528 // The fcmp predicates fill the lower part of the enum.
7529 FCmpInst::Predicate Pred = static_cast<FCmpInst::Predicate>(NewPred);
7530 if (Pred == FCmpInst::FCMP_FALSE &&
7531 isConstantLegalOrBeforeLegalizer(Ty: CmpTy)) {
7532 auto False = B.buildConstant(Res: CmpTy, Val: 0);
7533 B.buildZExtOrTrunc(Res: DestReg, Op: False);
7534 } else if (Pred == FCmpInst::FCMP_TRUE &&
7535 isConstantLegalOrBeforeLegalizer(Ty: CmpTy)) {
7536 auto True =
7537 B.buildConstant(Res: CmpTy, Val: getICmpTrueVal(TLI: getTargetLowering(),
7538 IsVector: CmpTy.isVector() /*isVector*/,
7539 IsFP: true /*isFP*/));
7540 B.buildZExtOrTrunc(Res: DestReg, Op: True);
7541 } else { // We take the predicate without predicate optimizations.
7542 auto Cmp = B.buildFCmp(Pred, Res: CmpTy, Op0: LHS0, Op1: LHS1, Flags);
7543 B.buildZExtOrTrunc(Res: DestReg, Op: Cmp);
7544 }
7545 };
7546 return true;
7547 }
7548
7549 return false;
7550}
7551
7552bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const {
7553 GAnd *And = cast<GAnd>(Val: &MI);
7554
7555 if (tryFoldAndOrOrICmpsUsingRanges(Logic: And, MatchInfo))
7556 return true;
7557
7558 if (tryFoldLogicOfFCmps(Logic: And, MatchInfo))
7559 return true;
7560
7561 return false;
7562}
7563
7564bool CombinerHelper::matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const {
7565 GOr *Or = cast<GOr>(Val: &MI);
7566
7567 if (tryFoldAndOrOrICmpsUsingRanges(Logic: Or, MatchInfo))
7568 return true;
7569
7570 if (tryFoldLogicOfFCmps(Logic: Or, MatchInfo))
7571 return true;
7572
7573 return false;
7574}
7575
7576bool CombinerHelper::matchAddOverflow(MachineInstr &MI,
7577 BuildFnTy &MatchInfo) const {
7578 GAddCarryOut *Add = cast<GAddCarryOut>(Val: &MI);
7579
7580 // Addo has no flags
7581 Register Dst = Add->getReg(Idx: 0);
7582 Register Carry = Add->getReg(Idx: 1);
7583 Register LHS = Add->getLHSReg();
7584 Register RHS = Add->getRHSReg();
7585 bool IsSigned = Add->isSigned();
7586 LLT DstTy = MRI.getType(Reg: Dst);
7587 LLT CarryTy = MRI.getType(Reg: Carry);
7588
7589 // Fold addo, if the carry is dead -> add, undef.
7590 if (MRI.use_nodbg_empty(RegNo: Carry) &&
7591 isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_ADD, {DstTy}})) {
7592 MatchInfo = [=](MachineIRBuilder &B) {
7593 B.buildAdd(Dst, Src0: LHS, Src1: RHS);
7594 B.buildUndef(Res: Carry);
7595 };
7596 return true;
7597 }
7598
7599 // Canonicalize constant to RHS.
7600 if (isConstantOrConstantVectorI(Src: LHS) && !isConstantOrConstantVectorI(Src: RHS)) {
7601 if (IsSigned) {
7602 MatchInfo = [=](MachineIRBuilder &B) {
7603 B.buildSAddo(Res: Dst, CarryOut: Carry, Op0: RHS, Op1: LHS);
7604 };
7605 return true;
7606 }
7607 // !IsSigned
7608 MatchInfo = [=](MachineIRBuilder &B) {
7609 B.buildUAddo(Res: Dst, CarryOut: Carry, Op0: RHS, Op1: LHS);
7610 };
7611 return true;
7612 }
7613
7614 std::optional<APInt> MaybeLHS = getConstantOrConstantSplatVector(Src: LHS);
7615 std::optional<APInt> MaybeRHS = getConstantOrConstantSplatVector(Src: RHS);
7616
7617 // Fold addo(c1, c2) -> c3, carry.
7618 if (MaybeLHS && MaybeRHS && isConstantLegalOrBeforeLegalizer(Ty: DstTy) &&
7619 isConstantLegalOrBeforeLegalizer(Ty: CarryTy)) {
7620 bool Overflow;
7621 APInt Result = IsSigned ? MaybeLHS->sadd_ov(RHS: *MaybeRHS, Overflow)
7622 : MaybeLHS->uadd_ov(RHS: *MaybeRHS, Overflow);
7623 MatchInfo = [=](MachineIRBuilder &B) {
7624 B.buildConstant(Res: Dst, Val: Result);
7625 B.buildConstant(Res: Carry, Val: Overflow);
7626 };
7627 return true;
7628 }
7629
7630 // Fold (addo x, 0) -> x, no carry
7631 if (MaybeRHS && *MaybeRHS == 0 && isConstantLegalOrBeforeLegalizer(Ty: CarryTy)) {
7632 MatchInfo = [=](MachineIRBuilder &B) {
7633 B.buildCopy(Res: Dst, Op: LHS);
7634 B.buildConstant(Res: Carry, Val: 0);
7635 };
7636 return true;
7637 }
7638
7639 // Given 2 constant operands whose sum does not overflow:
7640 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
7641 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
7642 GAdd *AddLHS = getOpcodeDef<GAdd>(Reg: LHS, MRI);
7643 if (MaybeRHS && AddLHS && MRI.hasOneNonDBGUse(RegNo: Add->getReg(Idx: 0)) &&
7644 ((IsSigned && AddLHS->getFlag(Flag: MachineInstr::MIFlag::NoSWrap)) ||
7645 (!IsSigned && AddLHS->getFlag(Flag: MachineInstr::MIFlag::NoUWrap)))) {
7646 std::optional<APInt> MaybeAddRHS =
7647 getConstantOrConstantSplatVector(Src: AddLHS->getRHSReg());
7648 if (MaybeAddRHS) {
7649 bool Overflow;
7650 APInt NewC = IsSigned ? MaybeAddRHS->sadd_ov(RHS: *MaybeRHS, Overflow)
7651 : MaybeAddRHS->uadd_ov(RHS: *MaybeRHS, Overflow);
7652 if (!Overflow && isConstantLegalOrBeforeLegalizer(Ty: DstTy)) {
7653 if (IsSigned) {
7654 MatchInfo = [=](MachineIRBuilder &B) {
7655 auto ConstRHS = B.buildConstant(Res: DstTy, Val: NewC);
7656 B.buildSAddo(Res: Dst, CarryOut: Carry, Op0: AddLHS->getLHSReg(), Op1: ConstRHS);
7657 };
7658 return true;
7659 }
7660 // !IsSigned
7661 MatchInfo = [=](MachineIRBuilder &B) {
7662 auto ConstRHS = B.buildConstant(Res: DstTy, Val: NewC);
7663 B.buildUAddo(Res: Dst, CarryOut: Carry, Op0: AddLHS->getLHSReg(), Op1: ConstRHS);
7664 };
7665 return true;
7666 }
7667 }
7668 };
7669
7670 // We try to combine addo to non-overflowing add.
7671 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_ADD, {DstTy}}) ||
7672 !isConstantLegalOrBeforeLegalizer(Ty: CarryTy))
7673 return false;
7674
7675 // We try to combine uaddo to non-overflowing add.
7676 if (!IsSigned) {
7677 ConstantRange CRLHS =
7678 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: LHS), /*IsSigned=*/false);
7679 ConstantRange CRRHS =
7680 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: RHS), /*IsSigned=*/false);
7681
7682 switch (CRLHS.unsignedAddMayOverflow(Other: CRRHS)) {
7683 case ConstantRange::OverflowResult::MayOverflow:
7684 return false;
7685 case ConstantRange::OverflowResult::NeverOverflows: {
7686 MatchInfo = [=](MachineIRBuilder &B) {
7687 B.buildAdd(Dst, Src0: LHS, Src1: RHS, Flags: MachineInstr::MIFlag::NoUWrap);
7688 B.buildConstant(Res: Carry, Val: 0);
7689 };
7690 return true;
7691 }
7692 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
7693 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: {
7694 MatchInfo = [=](MachineIRBuilder &B) {
7695 B.buildAdd(Dst, Src0: LHS, Src1: RHS);
7696 B.buildConstant(Res: Carry, Val: 1);
7697 };
7698 return true;
7699 }
7700 }
7701 return false;
7702 }
7703
7704 // We try to combine saddo to non-overflowing add.
7705
7706 // If LHS and RHS each have at least two sign bits, then there is no signed
7707 // overflow.
7708 if (VT->computeNumSignBits(R: RHS) > 1 && VT->computeNumSignBits(R: LHS) > 1) {
7709 MatchInfo = [=](MachineIRBuilder &B) {
7710 B.buildAdd(Dst, Src0: LHS, Src1: RHS, Flags: MachineInstr::MIFlag::NoSWrap);
7711 B.buildConstant(Res: Carry, Val: 0);
7712 };
7713 return true;
7714 }
7715
7716 ConstantRange CRLHS =
7717 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: LHS), /*IsSigned=*/true);
7718 ConstantRange CRRHS =
7719 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: RHS), /*IsSigned=*/true);
7720
7721 switch (CRLHS.signedAddMayOverflow(Other: CRRHS)) {
7722 case ConstantRange::OverflowResult::MayOverflow:
7723 return false;
7724 case ConstantRange::OverflowResult::NeverOverflows: {
7725 MatchInfo = [=](MachineIRBuilder &B) {
7726 B.buildAdd(Dst, Src0: LHS, Src1: RHS, Flags: MachineInstr::MIFlag::NoSWrap);
7727 B.buildConstant(Res: Carry, Val: 0);
7728 };
7729 return true;
7730 }
7731 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
7732 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: {
7733 MatchInfo = [=](MachineIRBuilder &B) {
7734 B.buildAdd(Dst, Src0: LHS, Src1: RHS);
7735 B.buildConstant(Res: Carry, Val: 1);
7736 };
7737 return true;
7738 }
7739 }
7740
7741 return false;
7742}
7743
7744void CombinerHelper::applyBuildFnMO(const MachineOperand &MO,
7745 BuildFnTy &MatchInfo) const {
7746 MachineInstr *Root = getDefIgnoringCopies(Reg: MO.getReg(), MRI);
7747 MatchInfo(Builder);
7748 Root->eraseFromParent();
7749}
7750
7751bool CombinerHelper::matchFPowIExpansion(MachineInstr &MI,
7752 int64_t Exponent) const {
7753 bool OptForSize = MI.getMF()->getFunction().hasOptSize();
7754 return getTargetLowering().isBeneficialToExpandPowI(Exponent, OptForSize);
7755}
7756
7757void CombinerHelper::applyExpandFPowI(MachineInstr &MI,
7758 int64_t Exponent) const {
7759 auto [Dst, Base] = MI.getFirst2Regs();
7760 LLT Ty = MRI.getType(Reg: Dst);
7761 int64_t ExpVal = Exponent;
7762
7763 if (ExpVal == 0) {
7764 Builder.buildFConstant(Res: Dst, Val: 1.0);
7765 MI.removeFromParent();
7766 return;
7767 }
7768
7769 if (ExpVal < 0)
7770 ExpVal = -ExpVal;
7771
7772 // We use the simple binary decomposition method from SelectionDAG ExpandPowI
7773 // to generate the multiply sequence. There are more optimal ways to do this
7774 // (for example, powi(x,15) generates one more multiply than it should), but
7775 // this has the benefit of being both really simple and much better than a
7776 // libcall.
7777 std::optional<SrcOp> Res;
7778 SrcOp CurSquare = Base;
7779 while (ExpVal > 0) {
7780 if (ExpVal & 1) {
7781 if (!Res)
7782 Res = CurSquare;
7783 else
7784 Res = Builder.buildFMul(Dst: Ty, Src0: *Res, Src1: CurSquare);
7785 }
7786
7787 CurSquare = Builder.buildFMul(Dst: Ty, Src0: CurSquare, Src1: CurSquare);
7788 ExpVal >>= 1;
7789 }
7790
7791 // If the original exponent was negative, invert the result, producing
7792 // 1/(x*x*x).
7793 if (Exponent < 0)
7794 Res = Builder.buildFDiv(Dst: Ty, Src0: Builder.buildFConstant(Res: Ty, Val: 1.0), Src1: *Res,
7795 Flags: MI.getFlags());
7796
7797 Builder.buildCopy(Res: Dst, Op: *Res);
7798 MI.eraseFromParent();
7799}
7800
7801bool CombinerHelper::matchFoldAPlusC1MinusC2(const MachineInstr &MI,
7802 BuildFnTy &MatchInfo) const {
7803 // fold (A+C1)-C2 -> A+(C1-C2)
7804 const GSub *Sub = cast<GSub>(Val: &MI);
7805 GAdd *Add = cast<GAdd>(Val: MRI.getVRegDef(Reg: Sub->getLHSReg()));
7806
7807 if (!MRI.hasOneNonDBGUse(RegNo: Add->getReg(Idx: 0)))
7808 return false;
7809
7810 APInt C2 = getIConstantFromReg(VReg: Sub->getRHSReg(), MRI);
7811 APInt C1 = getIConstantFromReg(VReg: Add->getRHSReg(), MRI);
7812
7813 Register Dst = Sub->getReg(Idx: 0);
7814 LLT DstTy = MRI.getType(Reg: Dst);
7815
7816 MatchInfo = [=](MachineIRBuilder &B) {
7817 auto Const = B.buildConstant(Res: DstTy, Val: C1 - C2);
7818 B.buildAdd(Dst, Src0: Add->getLHSReg(), Src1: Const);
7819 };
7820
7821 return true;
7822}
7823
7824bool CombinerHelper::matchFoldC2MinusAPlusC1(const MachineInstr &MI,
7825 BuildFnTy &MatchInfo) const {
7826 // fold C2-(A+C1) -> (C2-C1)-A
7827 const GSub *Sub = cast<GSub>(Val: &MI);
7828 GAdd *Add = cast<GAdd>(Val: MRI.getVRegDef(Reg: Sub->getRHSReg()));
7829
7830 if (!MRI.hasOneNonDBGUse(RegNo: Add->getReg(Idx: 0)))
7831 return false;
7832
7833 APInt C2 = getIConstantFromReg(VReg: Sub->getLHSReg(), MRI);
7834 APInt C1 = getIConstantFromReg(VReg: Add->getRHSReg(), MRI);
7835
7836 Register Dst = Sub->getReg(Idx: 0);
7837 LLT DstTy = MRI.getType(Reg: Dst);
7838
7839 MatchInfo = [=](MachineIRBuilder &B) {
7840 auto Const = B.buildConstant(Res: DstTy, Val: C2 - C1);
7841 B.buildSub(Dst, Src0: Const, Src1: Add->getLHSReg());
7842 };
7843
7844 return true;
7845}
7846
7847bool CombinerHelper::matchFoldAMinusC1MinusC2(const MachineInstr &MI,
7848 BuildFnTy &MatchInfo) const {
7849 // fold (A-C1)-C2 -> A-(C1+C2)
7850 const GSub *Sub1 = cast<GSub>(Val: &MI);
7851 GSub *Sub2 = cast<GSub>(Val: MRI.getVRegDef(Reg: Sub1->getLHSReg()));
7852
7853 if (!MRI.hasOneNonDBGUse(RegNo: Sub2->getReg(Idx: 0)))
7854 return false;
7855
7856 APInt C2 = getIConstantFromReg(VReg: Sub1->getRHSReg(), MRI);
7857 APInt C1 = getIConstantFromReg(VReg: Sub2->getRHSReg(), MRI);
7858
7859 Register Dst = Sub1->getReg(Idx: 0);
7860 LLT DstTy = MRI.getType(Reg: Dst);
7861
7862 MatchInfo = [=](MachineIRBuilder &B) {
7863 auto Const = B.buildConstant(Res: DstTy, Val: C1 + C2);
7864 B.buildSub(Dst, Src0: Sub2->getLHSReg(), Src1: Const);
7865 };
7866
7867 return true;
7868}
7869
7870bool CombinerHelper::matchFoldC1Minus2MinusC2(const MachineInstr &MI,
7871 BuildFnTy &MatchInfo) const {
7872 // fold (C1-A)-C2 -> (C1-C2)-A
7873 const GSub *Sub1 = cast<GSub>(Val: &MI);
7874 GSub *Sub2 = cast<GSub>(Val: MRI.getVRegDef(Reg: Sub1->getLHSReg()));
7875
7876 if (!MRI.hasOneNonDBGUse(RegNo: Sub2->getReg(Idx: 0)))
7877 return false;
7878
7879 APInt C2 = getIConstantFromReg(VReg: Sub1->getRHSReg(), MRI);
7880 APInt C1 = getIConstantFromReg(VReg: Sub2->getLHSReg(), MRI);
7881
7882 Register Dst = Sub1->getReg(Idx: 0);
7883 LLT DstTy = MRI.getType(Reg: Dst);
7884
7885 MatchInfo = [=](MachineIRBuilder &B) {
7886 auto Const = B.buildConstant(Res: DstTy, Val: C1 - C2);
7887 B.buildSub(Dst, Src0: Const, Src1: Sub2->getRHSReg());
7888 };
7889
7890 return true;
7891}
7892
7893bool CombinerHelper::matchFoldAMinusC1PlusC2(const MachineInstr &MI,
7894 BuildFnTy &MatchInfo) const {
7895 // fold ((A-C1)+C2) -> (A+(C2-C1))
7896 const GAdd *Add = cast<GAdd>(Val: &MI);
7897 GSub *Sub = cast<GSub>(Val: MRI.getVRegDef(Reg: Add->getLHSReg()));
7898
7899 if (!MRI.hasOneNonDBGUse(RegNo: Sub->getReg(Idx: 0)))
7900 return false;
7901
7902 APInt C2 = getIConstantFromReg(VReg: Add->getRHSReg(), MRI);
7903 APInt C1 = getIConstantFromReg(VReg: Sub->getRHSReg(), MRI);
7904
7905 Register Dst = Add->getReg(Idx: 0);
7906 LLT DstTy = MRI.getType(Reg: Dst);
7907
7908 MatchInfo = [=](MachineIRBuilder &B) {
7909 auto Const = B.buildConstant(Res: DstTy, Val: C2 - C1);
7910 B.buildAdd(Dst, Src0: Sub->getLHSReg(), Src1: Const);
7911 };
7912
7913 return true;
7914}
7915
7916bool CombinerHelper::matchUnmergeValuesAnyExtBuildVector(
7917 const MachineInstr &MI, BuildFnTy &MatchInfo) const {
7918 const GUnmerge *Unmerge = cast<GUnmerge>(Val: &MI);
7919
7920 if (!MRI.hasOneNonDBGUse(RegNo: Unmerge->getSourceReg()))
7921 return false;
7922
7923 const MachineInstr *Source = MRI.getVRegDef(Reg: Unmerge->getSourceReg());
7924
7925 LLT DstTy = MRI.getType(Reg: Unmerge->getReg(Idx: 0));
7926
7927 // $bv:_(<8 x s8>) = G_BUILD_VECTOR ....
7928 // $any:_(<8 x s16>) = G_ANYEXT $bv
7929 // $uv:_(<4 x s16>), $uv1:_(<4 x s16>) = G_UNMERGE_VALUES $any
7930 //
7931 // ->
7932 //
7933 // $any:_(s16) = G_ANYEXT $bv[0]
7934 // $any1:_(s16) = G_ANYEXT $bv[1]
7935 // $any2:_(s16) = G_ANYEXT $bv[2]
7936 // $any3:_(s16) = G_ANYEXT $bv[3]
7937 // $any4:_(s16) = G_ANYEXT $bv[4]
7938 // $any5:_(s16) = G_ANYEXT $bv[5]
7939 // $any6:_(s16) = G_ANYEXT $bv[6]
7940 // $any7:_(s16) = G_ANYEXT $bv[7]
7941 // $uv:_(<4 x s16>) = G_BUILD_VECTOR $any, $any1, $any2, $any3
7942 // $uv1:_(<4 x s16>) = G_BUILD_VECTOR $any4, $any5, $any6, $any7
7943
7944 // We want to unmerge into vectors.
7945 if (!DstTy.isFixedVector())
7946 return false;
7947
7948 const GAnyExt *Any = dyn_cast<GAnyExt>(Val: Source);
7949 if (!Any)
7950 return false;
7951
7952 const MachineInstr *NextSource = MRI.getVRegDef(Reg: Any->getSrcReg());
7953
7954 if (const GBuildVector *BV = dyn_cast<GBuildVector>(Val: NextSource)) {
7955 // G_UNMERGE_VALUES G_ANYEXT G_BUILD_VECTOR
7956
7957 if (!MRI.hasOneNonDBGUse(RegNo: BV->getReg(Idx: 0)))
7958 return false;
7959
7960 // FIXME: check element types?
7961 if (BV->getNumSources() % Unmerge->getNumDefs() != 0)
7962 return false;
7963
7964 LLT BigBvTy = MRI.getType(Reg: BV->getReg(Idx: 0));
7965 LLT SmallBvTy = DstTy;
7966 LLT SmallBvElemenTy = SmallBvTy.getElementType();
7967
7968 if (!isLegalOrBeforeLegalizer(
7969 Query: {TargetOpcode::G_BUILD_VECTOR, {SmallBvTy, SmallBvElemenTy}}))
7970 return false;
7971
7972 // We check the legality of scalar anyext.
7973 if (!isLegalOrBeforeLegalizer(
7974 Query: {TargetOpcode::G_ANYEXT,
7975 {SmallBvElemenTy, BigBvTy.getElementType()}}))
7976 return false;
7977
7978 MatchInfo = [=](MachineIRBuilder &B) {
7979 // Build into each G_UNMERGE_VALUES def
7980 // a small build vector with anyext from the source build vector.
7981 for (unsigned I = 0; I < Unmerge->getNumDefs(); ++I) {
7982 SmallVector<Register> Ops;
7983 for (unsigned J = 0; J < SmallBvTy.getNumElements(); ++J) {
7984 Register SourceArray =
7985 BV->getSourceReg(I: I * SmallBvTy.getNumElements() + J);
7986 auto AnyExt = B.buildAnyExt(Res: SmallBvElemenTy, Op: SourceArray);
7987 Ops.push_back(Elt: AnyExt.getReg(Idx: 0));
7988 }
7989 B.buildBuildVector(Res: Unmerge->getOperand(i: I).getReg(), Ops);
7990 };
7991 };
7992 return true;
7993 };
7994
7995 return false;
7996}
7997
7998bool CombinerHelper::matchShuffleUndefRHS(MachineInstr &MI,
7999 BuildFnTy &MatchInfo) const {
8000
8001 bool Changed = false;
8002 auto &Shuffle = cast<GShuffleVector>(Val&: MI);
8003 ArrayRef<int> OrigMask = Shuffle.getMask();
8004 SmallVector<int, 16> NewMask;
8005 const LLT SrcTy = MRI.getType(Reg: Shuffle.getSrc1Reg());
8006 const unsigned NumSrcElems = SrcTy.isVector() ? SrcTy.getNumElements() : 1;
8007 const unsigned NumDstElts = OrigMask.size();
8008 for (unsigned i = 0; i != NumDstElts; ++i) {
8009 int Idx = OrigMask[i];
8010 if (Idx >= (int)NumSrcElems) {
8011 Idx = -1;
8012 Changed = true;
8013 }
8014 NewMask.push_back(Elt: Idx);
8015 }
8016
8017 if (!Changed)
8018 return false;
8019
8020 MatchInfo = [&, NewMask = std::move(NewMask)](MachineIRBuilder &B) {
8021 B.buildShuffleVector(Res: MI.getOperand(i: 0), Src1: MI.getOperand(i: 1), Src2: MI.getOperand(i: 2),
8022 Mask: std::move(NewMask));
8023 };
8024
8025 return true;
8026}
8027
8028static void commuteMask(MutableArrayRef<int> Mask, const unsigned NumElems) {
8029 const unsigned MaskSize = Mask.size();
8030 for (unsigned I = 0; I < MaskSize; ++I) {
8031 int Idx = Mask[I];
8032 if (Idx < 0)
8033 continue;
8034
8035 if (Idx < (int)NumElems)
8036 Mask[I] = Idx + NumElems;
8037 else
8038 Mask[I] = Idx - NumElems;
8039 }
8040}
8041
8042bool CombinerHelper::matchShuffleDisjointMask(MachineInstr &MI,
8043 BuildFnTy &MatchInfo) const {
8044
8045 auto &Shuffle = cast<GShuffleVector>(Val&: MI);
8046 // If any of the two inputs is already undef, don't check the mask again to
8047 // prevent infinite loop
8048 if (getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: Shuffle.getSrc1Reg(), MRI))
8049 return false;
8050
8051 if (getOpcodeDef(Opcode: TargetOpcode::G_IMPLICIT_DEF, Reg: Shuffle.getSrc2Reg(), MRI))
8052 return false;
8053
8054 const LLT DstTy = MRI.getType(Reg: Shuffle.getReg(Idx: 0));
8055 const LLT Src1Ty = MRI.getType(Reg: Shuffle.getSrc1Reg());
8056 if (!isLegalOrBeforeLegalizer(
8057 Query: {TargetOpcode::G_SHUFFLE_VECTOR, {DstTy, Src1Ty}}))
8058 return false;
8059
8060 ArrayRef<int> Mask = Shuffle.getMask();
8061 const unsigned NumSrcElems = Src1Ty.isVector() ? Src1Ty.getNumElements() : 1;
8062
8063 bool TouchesSrc1 = false;
8064 bool TouchesSrc2 = false;
8065 const unsigned NumElems = Mask.size();
8066 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
8067 if (Mask[Idx] < 0)
8068 continue;
8069
8070 if (Mask[Idx] < (int)NumSrcElems)
8071 TouchesSrc1 = true;
8072 else
8073 TouchesSrc2 = true;
8074 }
8075
8076 if (TouchesSrc1 == TouchesSrc2)
8077 return false;
8078
8079 Register NewSrc1 = Shuffle.getSrc1Reg();
8080 SmallVector<int, 16> NewMask(Mask);
8081 if (TouchesSrc2) {
8082 NewSrc1 = Shuffle.getSrc2Reg();
8083 commuteMask(Mask: NewMask, NumElems: NumSrcElems);
8084 }
8085
8086 MatchInfo = [=, &Shuffle](MachineIRBuilder &B) {
8087 auto Undef = B.buildUndef(Res: Src1Ty);
8088 B.buildShuffleVector(Res: Shuffle.getReg(Idx: 0), Src1: NewSrc1, Src2: Undef, Mask: NewMask);
8089 };
8090
8091 return true;
8092}
8093
8094bool CombinerHelper::matchSuboCarryOut(const MachineInstr &MI,
8095 BuildFnTy &MatchInfo) const {
8096 const GSubCarryOut *Subo = cast<GSubCarryOut>(Val: &MI);
8097
8098 Register Dst = Subo->getReg(Idx: 0);
8099 Register LHS = Subo->getLHSReg();
8100 Register RHS = Subo->getRHSReg();
8101 Register Carry = Subo->getCarryOutReg();
8102 LLT DstTy = MRI.getType(Reg: Dst);
8103 LLT CarryTy = MRI.getType(Reg: Carry);
8104
8105 // Check legality before known bits.
8106 if (!isLegalOrBeforeLegalizer(Query: {TargetOpcode::G_SUB, {DstTy}}) ||
8107 !isConstantLegalOrBeforeLegalizer(Ty: CarryTy))
8108 return false;
8109
8110 ConstantRange KBLHS =
8111 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: LHS),
8112 /* IsSigned=*/Subo->isSigned());
8113 ConstantRange KBRHS =
8114 ConstantRange::fromKnownBits(Known: VT->getKnownBits(R: RHS),
8115 /* IsSigned=*/Subo->isSigned());
8116
8117 if (Subo->isSigned()) {
8118 // G_SSUBO
8119 switch (KBLHS.signedSubMayOverflow(Other: KBRHS)) {
8120 case ConstantRange::OverflowResult::MayOverflow:
8121 return false;
8122 case ConstantRange::OverflowResult::NeverOverflows: {
8123 MatchInfo = [=](MachineIRBuilder &B) {
8124 B.buildSub(Dst, Src0: LHS, Src1: RHS, Flags: MachineInstr::MIFlag::NoSWrap);
8125 B.buildConstant(Res: Carry, Val: 0);
8126 };
8127 return true;
8128 }
8129 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
8130 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: {
8131 MatchInfo = [=](MachineIRBuilder &B) {
8132 B.buildSub(Dst, Src0: LHS, Src1: RHS);
8133 B.buildConstant(Res: Carry, Val: getICmpTrueVal(TLI: getTargetLowering(),
8134 /*isVector=*/IsVector: CarryTy.isVector(),
8135 /*isFP=*/IsFP: false));
8136 };
8137 return true;
8138 }
8139 }
8140 return false;
8141 }
8142
8143 // G_USUBO
8144 switch (KBLHS.unsignedSubMayOverflow(Other: KBRHS)) {
8145 case ConstantRange::OverflowResult::MayOverflow:
8146 return false;
8147 case ConstantRange::OverflowResult::NeverOverflows: {
8148 MatchInfo = [=](MachineIRBuilder &B) {
8149 B.buildSub(Dst, Src0: LHS, Src1: RHS, Flags: MachineInstr::MIFlag::NoUWrap);
8150 B.buildConstant(Res: Carry, Val: 0);
8151 };
8152 return true;
8153 }
8154 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
8155 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: {
8156 MatchInfo = [=](MachineIRBuilder &B) {
8157 B.buildSub(Dst, Src0: LHS, Src1: RHS);
8158 B.buildConstant(Res: Carry, Val: getICmpTrueVal(TLI: getTargetLowering(),
8159 /*isVector=*/IsVector: CarryTy.isVector(),
8160 /*isFP=*/IsFP: false));
8161 };
8162 return true;
8163 }
8164 }
8165
8166 return false;
8167}
8168

source code of llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp