diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h index b72a27cab86b34..6d3d0acc2076d4 100644 --- a/llvm/include/llvm/Analysis/MemoryLocation.h +++ b/llvm/include/llvm/Analysis/MemoryLocation.h @@ -191,8 +191,14 @@ class LocationSize { return Value == Other.Value; } + bool operator==(const TypeSize &Other) const { + return hasValue() && getValue() == Other; + } + bool operator!=(const LocationSize &Other) const { return !(*this == Other); } + bool operator!=(const TypeSize &Other) const { return !(*this == Other); } + // Ordering operators are not provided, since it's unclear if there's only one // reasonable way to compare: // - values that don't exist against values that do, and @@ -293,8 +299,9 @@ class MemoryLocation { // Return the exact size if the exact size is known at compiletime, // otherwise return MemoryLocation::UnknownSize. - static uint64_t getSizeOrUnknown(const TypeSize &T) { - return T.isScalable() ? UnknownSize : T.getFixedValue(); + static LocationSize getSizeOrUnknown(const TypeSize &T) { + return T.isScalable() ? LocationSize::beforeOrAfterPointer() + : LocationSize::precise(T.getFixedValue()); } MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {} diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h index 8e456015cd3736..c73ac2c9f55b7b 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h @@ -647,15 +647,15 @@ bool GIMatchTableExecutor::executeMatchTable( unsigned Size = MRI.getType(MO.getReg()).getSizeInBits(); if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT && - MMO->getSizeInBits() != Size) { + MMO->getSizeInBits().getValue() != Size) { if (handleReject() == RejectAndGiveUp) return false; } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT && - MMO->getSizeInBits() >= Size) { + MMO->getSizeInBits().getValue() >= Size) { if (handleReject() == RejectAndGiveUp) return false; } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT && - MMO->getSizeInBits() <= Size) + MMO->getSizeInBits().getValue() <= Size) if (handleReject() == RejectAndGiveUp) return false; diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h index 6b03703192df91..261cfcf504d5fe 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h @@ -54,9 +54,9 @@ class GMemOperation : public GenericMachineInstr { bool isUnordered() const { return getMMO().isUnordered(); } /// Returns the size in bytes of the memory access. - uint64_t getMemSize() const { return getMMO().getSize(); } + LocationSize getMemSize() const { return getMMO().getSize(); } /// Returns the size in bits of the memory access. - uint64_t getMemSizeInBits() const { return getMMO().getSizeInBits(); } + LocationSize getMemSizeInBits() const { return getMMO().getSizeInBits(); } static bool classof(const MachineInstr *MI) { return GenericMachineInstr::classof(MI) && MI->hasOneMemOperand(); diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h index a2c90c9f42f38f..dfbf7a1e7aae53 100644 --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -1026,18 +1026,27 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction { /// MachineMemOperands are owned by the MachineFunction and need not be /// explicitly deallocated. MachineMemOperand *getMachineMemOperand( - MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, + MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); - MachineMemOperand *getMachineMemOperand( - MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, - Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), + MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size, + Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); + MachineMemOperand *getMachineMemOperand( + MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, uint64_t Size, + Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(), + const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, + AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) { + return getMachineMemOperand(PtrInfo, F, LocationSize::precise(Size), + BaseAlignment, AAInfo, Ranges, SSID, Ordering, + FailureOrdering); + } /// getMachineMemOperand - Allocate a new MachineMemOperand by copying /// an existing one, adjusting by an offset and using the given size. @@ -1046,9 +1055,16 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction { MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, int64_t Offset, LLT Ty); MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, - int64_t Offset, uint64_t Size) { + int64_t Offset, LocationSize Size) { return getMachineMemOperand( - MMO, Offset, Size == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * Size)); + MMO, Offset, + !Size.hasValue() || Size.isScalable() + ? LLT() + : LLT::scalar(8 * Size.getValue().getKnownMinValue())); + } + MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, + int64_t Offset, uint64_t Size) { + return getMachineMemOperand(MMO, Offset, LocationSize::precise(Size)); } /// getMachineMemOperand - Allocate a new MachineMemOperand by copying @@ -1057,10 +1073,15 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction { /// explicitly deallocated. MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, - uint64_t Size); + LocationSize Size); MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty); + MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, + const MachinePointerInfo &PtrInfo, + uint64_t Size) { + return getMachineMemOperand(MMO, PtrInfo, LocationSize::precise(Size)); + } /// Allocate a new MachineMemOperand by copying an existing one, /// replacing only AliasAnalysis information. MachineMemOperands are owned diff --git a/llvm/include/llvm/CodeGen/MachineMemOperand.h b/llvm/include/llvm/CodeGen/MachineMemOperand.h index 12c18aaea5b26c..da4ca582cb9e4f 100644 --- a/llvm/include/llvm/CodeGen/MachineMemOperand.h +++ b/llvm/include/llvm/CodeGen/MachineMemOperand.h @@ -17,6 +17,7 @@ #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerUnion.h" +#include "llvm/Analysis/MemoryLocation.h" #include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGenTypes/LowLevelType.h" #include "llvm/IR/DerivedTypes.h" @@ -186,7 +187,7 @@ class MachineMemOperand { /// and atomic ordering requirements must also be specified. For cmpxchg /// atomic operations the atomic ordering requirements when store does not /// occur must also be specified. - MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s, + MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LocationSize TS, Align a, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, @@ -235,13 +236,17 @@ class MachineMemOperand { LLT getMemoryType() const { return MemoryType; } /// Return the size in bytes of the memory reference. - uint64_t getSize() const { - return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0); + LocationSize getSize() const { + return MemoryType.isValid() + ? LocationSize::precise(MemoryType.getSizeInBytes()) + : LocationSize::beforeOrAfterPointer(); } /// Return the size in bits of the memory reference. - uint64_t getSizeInBits() const { - return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0); + LocationSize getSizeInBits() const { + return MemoryType.isValid() + ? LocationSize::precise(MemoryType.getSizeInBits()) + : LocationSize::beforeOrAfterPointer(); } LLT getType() const { diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h index 25e6c525b672a1..4785e93d72d1cc 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1299,7 +1299,7 @@ class SelectionDAG { EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore, - uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()); + LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes()); inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, @@ -1307,7 +1307,7 @@ class SelectionDAG { MaybeAlign Alignment = std::nullopt, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore, - uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) { + LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) { // Ensure that codegen never sees alignment 0 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo, Alignment.value_or(getEVTAlign(MemVT)), Flags, diff --git a/llvm/lib/CodeGen/DFAPacketizer.cpp b/llvm/lib/CodeGen/DFAPacketizer.cpp index 48bb4a07662e10..c16166a1d5e1c5 100644 --- a/llvm/lib/CodeGen/DFAPacketizer.cpp +++ b/llvm/lib/CodeGen/DFAPacketizer.cpp @@ -252,12 +252,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB, bool VLIWPacketizerList::alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2, bool UseTBAA) const { - if (!Op1.getValue() || !Op2.getValue()) + if (!Op1.getValue() || !Op2.getValue() || !Op1.getSize().hasValue() || + !Op2.getSize().hasValue()) return true; int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset()); - int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset; - int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset; + int64_t Overlapa = Op1.getSize().getValue() + Op1.getOffset() - MinOffset; + int64_t Overlapb = Op2.getSize().getValue() + Op2.getOffset() - MinOffset; AliasResult AAResult = AA->alias(MemoryLocation(Op1.getValue(), Overlapa, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index bee49dbd0f8380..d3f86af1e2908e 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -770,12 +770,12 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI, LLT RegTy = MRI.getType(LoadReg); Register PtrReg = LoadMI->getPointerReg(); unsigned RegSize = RegTy.getSizeInBits(); - uint64_t LoadSizeBits = LoadMI->getMemSizeInBits(); + LocationSize LoadSizeBits = LoadMI->getMemSizeInBits(); unsigned MaskSizeBits = MaskVal.countr_one(); // The mask may not be larger than the in-memory type, as it might cover sign // extended bits - if (MaskSizeBits > LoadSizeBits) + if (MaskSizeBits > LoadSizeBits.getValue()) return false; // If the mask covers the whole destination register, there's nothing to @@ -795,7 +795,8 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI, // still adjust the opcode to indicate the high bit behavior. if (LoadMI->isSimple()) MemDesc.MemoryTy = LLT::scalar(MaskSizeBits); - else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize) + else if (LoadSizeBits.getValue() > MaskSizeBits || + LoadSizeBits.getValue() == RegSize) return false; // TODO: Could check if it's legal with the reduced or original memory size. @@ -860,7 +861,8 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { if (auto *LoadMI = getOpcodeDef(LoadUser, MRI)) { // If truncating more than the original extended value, abort. auto LoadSizeBits = LoadMI->getMemSizeInBits(); - if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits) + if (TruncSrc && + MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits.getValue()) return false; if (LoadSizeBits == SizeInBits) return true; @@ -891,7 +893,7 @@ bool CombinerHelper::matchSextInRegOfLoad( if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg)) return false; - uint64_t MemBits = LoadDef->getMemSizeInBits(); + uint64_t MemBits = LoadDef->getMemSizeInBits().getValue(); // If the sign extend extends from a narrower width than the load's width, // then we can narrow the load width when we combine to a G_SEXTLOAD. diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp index 099bf45b2734cb..2e2cc9a95bd95c 100644 --- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -415,7 +415,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, if (DstTy.isVector()) break; // Everything above the retrieved bits is zero - Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits()); + Known.Zero.setBitsFrom( + (*MI.memoperands_begin())->getSizeInBits().getValue()); break; } case TargetOpcode::G_ASHR: { @@ -666,7 +667,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R, // e.g. i16->i32 = '17' bits known. const MachineMemOperand *MMO = *MI.memoperands_begin(); - return TyBits - MMO->getSizeInBits() + 1; + return TyBits - MMO->getSizeInBits().getValue() + 1; } case TargetOpcode::G_ZEXTLOAD: { // FIXME: We need an in-memory type representation. @@ -675,7 +676,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R, // e.g. i16->i32 = '16' bits known. const MachineMemOperand *MMO = *MI.memoperands_begin(); - return TyBits - MMO->getSizeInBits(); + return TyBits - MMO->getSizeInBits().getValue(); } case TargetOpcode::G_TRUNC: { Register Src = MI.getOperand(1).getReg(); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index bd3ff7265d51f9..bc062041a564d1 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1317,7 +1317,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, if (DstTy.isVector()) return UnableToLegalize; - if (8 * LoadMI.getMemSize() != DstTy.getSizeInBits()) { + if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) { Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO()); MIRBuilder.buildAnyExt(DstReg, TmpReg); @@ -1335,7 +1335,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); auto &MMO = LoadMI.getMMO(); - unsigned MemSize = MMO.getSizeInBits(); + unsigned MemSize = MMO.getSizeInBits().getValue(); if (MemSize == NarrowSize) { MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); @@ -1368,7 +1368,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, if (SrcTy.isVector() && LeftoverBits != 0) return UnableToLegalize; - if (8 * StoreMI.getMemSize() != SrcTy.getSizeInBits()) { + if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) { Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildTrunc(TmpReg, SrcReg); MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO()); @@ -4456,7 +4456,7 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx, LLT ValTy = MRI.getType(ValReg); // FIXME: Do we need a distinct NarrowMemory legalize action? - if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize()) { + if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize().getValue()) { LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n"); return UnableToLegalize; } diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp index b5c9d3e912cc20..9fc8ecd60b03ff 100644 --- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp @@ -117,12 +117,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1, if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid()) return false; - LocationSize Size1 = LdSt1->getMemSize() != MemoryLocation::UnknownSize - ? LdSt1->getMemSize() - : LocationSize::beforeOrAfterPointer(); - LocationSize Size2 = LdSt2->getMemSize() != MemoryLocation::UnknownSize - ? LdSt2->getMemSize() - : LocationSize::beforeOrAfterPointer(); + LocationSize Size1 = LdSt1->getMemSize(); + LocationSize Size2 = LdSt2->getMemSize(); int64_t PtrDiff; if (BasePtr0.BaseReg == BasePtr1.BaseReg) { @@ -214,14 +210,9 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI, Offset = 0; } - TypeSize Size = LS->getMMO().getMemoryType().getSizeInBytes(); - return {LS->isVolatile(), - LS->isAtomic(), - BaseReg, - Offset /*base offset*/, - Size.isScalable() ? LocationSize::beforeOrAfterPointer() - : LocationSize::precise(Size), - &LS->getMMO()}; + LocationSize Size = LS->getMMO().getSize(); + return {LS->isVolatile(), LS->isAtomic(), BaseReg, + Offset /*base offset*/, Size, &LS->getMMO()}; } // FIXME: support recognizing lifetime instructions. // Default. diff --git a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp index cfc8c28b99e562..481d9e341da377 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -1356,10 +1356,11 @@ InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) { // from the stack at some point. Happily the memory operand will tell us // the size written to the stack. auto *MemOperand = *MI.memoperands_begin(); - unsigned SizeInBits = MemOperand->getSizeInBits(); + LocationSize SizeInBits = MemOperand->getSizeInBits(); + assert(SizeInBits.hasValue() && "Expected to find a valid size!"); // Find that position in the stack indexes we're tracking. - auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits, 0}); + auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits.getValue(), 0}); if (IdxIt == MTracker->StackSlotIdxes.end()) // That index is not tracked. This is suprising, and unlikely to ever // occur, but the safe action is to indicate the variable is optimised out. diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp index 812d57984e6cae..ccfc4565d3a9bc 100644 --- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp +++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp @@ -123,7 +123,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) { llvm::transform(MI.uses(), std::back_inserter(MIOperands), GetHashableMO); for (const auto *Op : MI.memoperands()) { - MIOperands.push_back((unsigned)Op->getSize()); + MIOperands.push_back((unsigned)Op->getSize().getValue()); MIOperands.push_back((unsigned)Op->getFlags()); MIOperands.push_back((unsigned)Op->getOffset()); MIOperands.push_back((unsigned)Op->getSuccessOrdering()); diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index 323f1a6a1b2bd7..ad532149926670 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -484,13 +484,17 @@ void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) { } MachineMemOperand *MachineFunction::getMachineMemOperand( - MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, - Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, + MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size, + Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges, SyncScope::ID SSID, AtomicOrdering Ordering, AtomicOrdering FailureOrdering) { + assert((!Size.hasValue() || + Size.getValue().getKnownMinValue() != ~UINT64_C(0)) && + "Unexpected an unknown size to be represented using " + "LocationSize::beforeOrAfter()"); return new (Allocator) - MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, - SSID, Ordering, FailureOrdering); + MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID, + Ordering, FailureOrdering); } MachineMemOperand *MachineFunction::getMachineMemOperand( @@ -503,8 +507,14 @@ MachineMemOperand *MachineFunction::getMachineMemOperand( Ordering, FailureOrdering); } -MachineMemOperand *MachineFunction::getMachineMemOperand( - const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { +MachineMemOperand * +MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, + const MachinePointerInfo &PtrInfo, + LocationSize Size) { + assert((!Size.hasValue() || + Size.getValue().getKnownMinValue() != ~UINT64_C(0)) && + "Unexpected an unknown size to be represented using " + "LocationSize::beforeOrAfter()"); return new (Allocator) MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), AAMDNodes(), nullptr, MMO->getSyncScopeID(), diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 6654e1d6ceceea..fe2f9ccd33a330 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -1302,10 +1302,10 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, int64_t OffsetB = MMOb->getOffset(); int64_t MinOffset = std::min(OffsetA, OffsetB); - uint64_t WidthA = MMOa->getSize(); - uint64_t WidthB = MMOb->getSize(); - bool KnownWidthA = WidthA != MemoryLocation::UnknownSize; - bool KnownWidthB = WidthB != MemoryLocation::UnknownSize; + LocationSize WidthA = MMOa->getSize(); + LocationSize WidthB = MMOb->getSize(); + bool KnownWidthA = WidthA.hasValue(); + bool KnownWidthB = WidthB.hasValue(); const Value *ValA = MMOa->getValue(); const Value *ValB = MMOb->getValue(); @@ -1325,8 +1325,8 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, if (!KnownWidthA || !KnownWidthB) return true; int64_t MaxOffset = std::max(OffsetA, OffsetB); - int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; - return (MinOffset + LowWidth > MaxOffset); + LocationSize LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; + return (MinOffset + (int)LowWidth.getValue() > MaxOffset); } if (!AA) @@ -1338,10 +1338,10 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA, assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); - int64_t OverlapA = - KnownWidthA ? WidthA + OffsetA - MinOffset : MemoryLocation::UnknownSize; - int64_t OverlapB = - KnownWidthB ? WidthB + OffsetB - MinOffset : MemoryLocation::UnknownSize; + int64_t OverlapA = KnownWidthA ? WidthA.getValue() + OffsetA - MinOffset + : MemoryLocation::UnknownSize; + int64_t OverlapB = KnownWidthB ? WidthB.getValue() + OffsetB - MinOffset + : MemoryLocation::UnknownSize; return !AA->isNoAlias( MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), @@ -2357,15 +2357,16 @@ using MMOList = SmallVector; static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI) { uint64_t Size = 0; - for (const auto *A : Accesses) + for (const auto *A : Accesses) { if (MFI.isSpillSlotObjectIndex( cast(A->getPseudoValue()) ->getFrameIndex())) { - uint64_t S = A->getSize(); - if (S == ~UINT64_C(0)) + LocationSize S = A->getSize(); + if (!S.hasValue()) return LocationSize::beforeOrAfterPointer(); - Size += S; + Size += S.getValue(); } + } return Size; } @@ -2374,10 +2375,8 @@ MachineInstr::getSpillSize(const TargetInstrInfo *TII) const { int FI; if (TII->isStoreToStackSlotPostFE(*this, FI)) { const MachineFrameInfo &MFI = getMF()->getFrameInfo(); - if (MFI.isSpillSlotObjectIndex(FI)) { - uint64_t Size = (*memoperands_begin())->getSize(); - return Size == ~UINT64_C(0) ? LocationSize::beforeOrAfterPointer() : Size; - } + if (MFI.isSpillSlotObjectIndex(FI)) + return (*memoperands_begin())->getSize(); } return std::nullopt; } @@ -2395,10 +2394,8 @@ MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const { int FI; if (TII->isLoadFromStackSlotPostFE(*this, FI)) { const MachineFrameInfo &MFI = getMF()->getFrameInfo(); - if (MFI.isSpillSlotObjectIndex(FI)) { - uint64_t Size = (*memoperands_begin())->getSize(); - return Size == ~UINT64_C(0) ? LocationSize::beforeOrAfterPointer() : Size; - } + if (MFI.isSpillSlotObjectIndex(FI)) + return (*memoperands_begin())->getSize(); } return std::nullopt; } diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index c7c0a1c20d57f4..937ca539513afd 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1101,24 +1101,26 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, assert(getFailureOrdering() == FailureOrdering && "Value truncated"); } -MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, - uint64_t s, Align a, +MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags F, + LocationSize TS, Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges, SyncScope::ID SSID, AtomicOrdering Ordering, AtomicOrdering FailureOrdering) - : MachineMemOperand(ptrinfo, f, - s == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * s), a, - AAInfo, Ranges, SSID, Ordering, FailureOrdering) {} + : MachineMemOperand(ptrinfo, F, + !TS.hasValue() || TS.isScalable() + ? LLT() + : LLT::scalar(8 * TS.getValue().getKnownMinValue()), + BaseAlignment, AAInfo, Ranges, SSID, Ordering, + FailureOrdering) {} void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) { // The Value and Offset may differ due to CSE. But the flags and size // should be the same. assert(MMO->getFlags() == getFlags() && "Flags mismatch!"); - assert((MMO->getSize() == ~UINT64_C(0) || getSize() == ~UINT64_C(0) || + assert((!MMO->getSize().hasValue() || !getSize().hasValue() || MMO->getSize() == getSize()) && "Size mismatch!"); - if (MMO->getBaseAlign() >= getBaseAlign()) { // Update the alignment value. BaseAlign = MMO->getBaseAlign(); @@ -1240,7 +1242,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, << "unknown-address"; } MachineOperand::printOperandOffset(OS, getOffset()); - if (getSize() > 0 && getAlign() != getSize()) + if (!getSize().hasValue() || + getAlign() != getSize().getValue().getKnownMinValue()) OS << ", align " << getAlign().value(); if (getAlign() != getBaseAlign()) OS << ", basealign " << getBaseAlign().value(); diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp index d8cb6816883394..eb42a78603d407 100644 --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -2732,19 +2732,20 @@ bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep, if (!LoopDefS || !TII->getIncrementValue(*LoopDefS, D)) return true; - uint64_t AccessSizeS = (*SI->memoperands_begin())->getSize(); - uint64_t AccessSizeD = (*DI->memoperands_begin())->getSize(); + LocationSize AccessSizeS = (*SI->memoperands_begin())->getSize(); + LocationSize AccessSizeD = (*DI->memoperands_begin())->getSize(); // This is the main test, which checks the offset values and the loop // increment value to determine if the accesses may be loop carried. - if (AccessSizeS == MemoryLocation::UnknownSize || - AccessSizeD == MemoryLocation::UnknownSize) + if (!AccessSizeS.hasValue() || !AccessSizeD.hasValue()) return true; - if (DeltaS != DeltaD || DeltaS < AccessSizeS || DeltaD < AccessSizeD) + if (DeltaS != DeltaD || DeltaS < AccessSizeS.getValue() || + DeltaD < AccessSizeD.getValue()) return true; - return (OffsetS + (int64_t)AccessSizeS < OffsetD + (int64_t)AccessSizeD); + return (OffsetS + (int64_t)AccessSizeS.getValue() < + OffsetD + (int64_t)AccessSizeD.getValue()); } void SwingSchedulerDAG::postProcessDAG() { diff --git a/llvm/lib/CodeGen/MachineStableHash.cpp b/llvm/lib/CodeGen/MachineStableHash.cpp index 1cd90474898e77..5abfbd5981fba8 100644 --- a/llvm/lib/CodeGen/MachineStableHash.cpp +++ b/llvm/lib/CodeGen/MachineStableHash.cpp @@ -200,7 +200,7 @@ stable_hash llvm::stableHashValue(const MachineInstr &MI, bool HashVRegs, for (const auto *Op : MI.memoperands()) { if (!HashMemOperands) break; - HashComponents.push_back(static_cast(Op->getSize())); + HashComponents.push_back(static_cast(Op->getSize().getValue())); HashComponents.push_back(static_cast(Op->getFlags())); HashComponents.push_back(static_cast(Op->getOffset())); HashComponents.push_back(static_cast(Op->getSuccessOrdering())); diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index c2d6dd35e1cb21..c69d36fc7fdd60 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1195,13 +1195,16 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { const MachineMemOperand &MMO = **MI->memoperands_begin(); if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { - if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) + if (TypeSize::isKnownGE(MMO.getSizeInBits().getValue(), + ValTy.getSizeInBits())) report("Generic extload must have a narrower memory type", MI); } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { - if (MMO.getSize() > ValTy.getSizeInBytes()) + if (TypeSize::isKnownGT(MMO.getSize().getValue(), + ValTy.getSizeInBytes())) report("load memory size cannot exceed result size", MI); } else if (MI->getOpcode() == TargetOpcode::G_STORE) { - if (ValTy.getSizeInBytes() < MMO.getSize()) + if (TypeSize::isKnownLT(ValTy.getSizeInBytes(), + MMO.getSize().getValue())) report("store memory size cannot exceed value size", MI); } diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 0bef513342ff12..bdae94c4e6f885 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -979,8 +979,8 @@ void ModuloScheduleExpander::updateMemOperands(MachineInstr &NewMI, NewMMOs.push_back( MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize())); } else { - NewMMOs.push_back( - MF.getMachineMemOperand(MMO, 0, MemoryLocation::UnknownSize)); + NewMMOs.push_back(MF.getMachineMemOperand( + MMO, 0, LocationSize::beforeOrAfterPointer())); } } NewMI.setMemRefs(MF, NewMMOs); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index b6a5925123f13f..5eb53d57c9c2bf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -24160,7 +24160,7 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) { // TODO: Use "BaseIndexOffset" to make this more effective. SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL); - uint64_t StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize()); + LocationSize StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize()); MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO; if (Offset.isScalable()) { @@ -27805,14 +27805,13 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const { : (LSN->getAddressingMode() == ISD::PRE_DEC) ? -1 * C->getSExtValue() : 0; - uint64_t Size = + LocationSize Size = MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize()); return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(), Offset /*base offset*/, - Size != ~UINT64_C(0) ? LocationSize::precise(Size) - : LocationSize::beforeOrAfterPointer(), + Size, LSN->getMemOperand()}; } if (const auto *LN = cast(N)) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 2dccc45c803a05..808e3c622033e0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -267,7 +267,9 @@ static MachineMemOperand *getStackAlignedMMO(SDValue StackPtr, auto &MFI = MF.getFrameInfo(); int FI = cast(StackPtr)->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); - uint64_t ObjectSize = isObjectScalable ? ~UINT64_C(0) : MFI.getObjectSize(FI); + LocationSize ObjectSize = isObjectScalable + ? LocationSize::beforeOrAfterPointer() + : LocationSize::precise(MFI.getObjectSize(FI)); return MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, ObjectSize, MFI.getObjectAlign(FI)); } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 5fb9d8d07d1514..1f6e0097f31ab4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1967,7 +1967,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( LD->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, LD->getAAInfo(), LD->getRanges()); + LocationSize::beforeOrAfterPointer(), Alignment, LD->getAAInfo(), + LD->getRanges()); Lo = DAG.getLoadVP(LD->getAddressingMode(), ExtType, LoVT, dl, Ch, Ptr, Offset, @@ -1990,8 +1991,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo, LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment, - LD->getAAInfo(), LD->getRanges()); + MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), + Alignment, LD->getAAInfo(), LD->getRanges()); Hi = DAG.getLoadVP(LD->getAddressingMode(), ExtType, HiVT, dl, Ch, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO, @@ -2070,8 +2071,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()), - MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment, - SLD->getAAInfo(), SLD->getRanges()); + MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), + Alignment, SLD->getAAInfo(), SLD->getRanges()); Hi = DAG.getStridedLoadVP(SLD->getAddressingMode(), SLD->getExtensionType(), HiVT, DL, SLD->getChain(), Ptr, SLD->getOffset(), @@ -2130,7 +2131,7 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MLD->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, MLD->getAAInfo(), + LocationSize::beforeOrAfterPointer(), Alignment, MLD->getAAInfo(), MLD->getRanges()); Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT, @@ -2154,8 +2155,8 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment, - MLD->getAAInfo(), MLD->getRanges()); + MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), + Alignment, MLD->getAAInfo(), MLD->getRanges()); Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi, HiMemVT, MMO, MLD->getAddressingMode(), ExtType, @@ -2217,7 +2218,8 @@ void DAGTypeLegalizer::SplitVecRes_Gather(MemSDNode *N, SDValue &Lo, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), + N->getRanges()); if (auto *MGT = dyn_cast(N)) { SDValue PassThru = MGT->getPassThru(); @@ -2884,10 +2886,10 @@ void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo, auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand( - PtrInfo, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, + PtrInfo, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), Alignment); MachineMemOperand *LoadMMO = DAG.getMachineFunction().getMachineMemOperand( - PtrInfo, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, + PtrInfo, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), Alignment); unsigned EltWidth = VT.getScalarSizeInBits() / 8; @@ -3478,7 +3480,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) { SDValue Lo, Hi; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), + N->getRanges()); Lo = DAG.getStoreVP(Ch, DL, DataLo, Ptr, Offset, MaskLo, EVLLo, LoMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(), @@ -3501,8 +3504,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) { LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment, - N->getAAInfo(), N->getRanges()); + MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), + Alignment, N->getAAInfo(), N->getRanges()); Hi = DAG.getStoreVP(Ch, DL, DataHi, Ptr, Offset, MaskHi, EVLHi, HiMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(), @@ -3574,8 +3577,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(N->getPointerInfo().getAddrSpace()), - MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment, - N->getAAInfo(), N->getRanges()); + MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), + Alignment, N->getAAInfo(), N->getRanges()); SDValue Hi = DAG.getStridedStoreVP( N->getChain(), DL, HiData, Ptr, N->getOffset(), N->getStride(), HiMask, @@ -3626,7 +3629,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N, SDValue Lo, Hi, Res; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), + N->getRanges()); Lo = DAG.getMaskedStore(Ch, DL, DataLo, Ptr, Offset, MaskLo, LoMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(), @@ -3651,8 +3655,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N, LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment, - N->getAAInfo(), N->getRanges()); + MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), + Alignment, N->getAAInfo(), N->getRanges()); Hi = DAG.getMaskedStore(Ch, DL, DataHi, Ptr, Offset, MaskHi, HiMemVT, MMO, N->getAddressingMode(), N->isTruncatingStore(), @@ -3716,7 +3720,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_Scatter(MemSDNode *N, unsigned OpNo) { SDValue Lo; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + LocationSize::beforeOrAfterPointer(), Alignment, N->getAAInfo(), + N->getRanges()); if (auto *MSC = dyn_cast(N)) { SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Ops.Scale}; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 19f9354c23da42..2670f48aebcff5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8392,11 +8392,12 @@ SDValue SelectionDAG::getMergeValues(ArrayRef Ops, const SDLoc &dl) { SDValue SelectionDAG::getMemIntrinsicNode( unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, - MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { - if (!Size && MemVT.isScalableVector()) + MachineMemOperand::Flags Flags, LocationSize Size, + const AAMDNodes &AAInfo) { + if (Size.hasValue() && MemVT.isScalableVector()) Size = MemoryLocation::UnknownSize; - else if (!Size) - Size = MemVT.getStoreSize(); + else if (Size.hasValue() && !Size.getValue()) + Size = LocationSize::precise(MemVT.getStoreSize()); MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = @@ -8558,7 +8559,7 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, if (PtrInfo.V.isNull()) PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); - uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); + LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo, Ranges); @@ -8679,7 +8680,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); MachineFunction &MF = getMachineFunction(); - uint64_t Size = + LocationSize Size = MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); @@ -8828,7 +8829,7 @@ SDValue SelectionDAG::getLoadVP( if (PtrInfo.V.isNull()) PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); - uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); + LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); MachineFunction &MF = getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo, Ranges); @@ -11718,8 +11719,10 @@ MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, // the MMO. This is because the MMO might indicate only a possible address // range instead of specifying the affected memory addresses precisely. // TODO: Make MachineMemOperands aware of scalable vectors. - assert(memvt.getStoreSize().getKnownMinValue() <= MMO->getSize() && - "Size mismatch!"); + assert( + (!MMO->getType().isValid() || + memvt.getStoreSize().getKnownMinValue() <= MMO->getSize().getValue()) && + "Size mismatch!"); } /// Profile - Gather unique data for the node. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index b6a35f7ad4c4c6..f1923a64368f4f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3037,7 +3037,8 @@ static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable; MachineMemOperand *MemRef = MF.getMachineMemOperand( - MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy)); + MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8), + DAG.getEVTAlign(PtrTy)); DAG.setNodeMemRefs(Node, {MemRef}); } if (PtrTy != PtrMemTy) @@ -4753,7 +4754,7 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, I.getAAMetadata()); + LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata()); SDValue StoreNode = DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO, ISD::UNINDEXED, false /* Truncating */, IsCompressing); @@ -4925,7 +4926,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges); + LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges); SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO, @@ -5003,9 +5004,9 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering, - FailureOrdering); + MachinePointerInfo(I.getPointerOperand()), Flags, + LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT), + AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering); SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, @@ -5057,8 +5058,9 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering); + MachinePointerInfo(I.getPointerOperand()), Flags, + LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT), + AAMDNodes(), nullptr, SSID, Ordering); SDValue L = DAG.getAtomic(NT, dl, MemVT, InChain, @@ -5103,8 +5105,9 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( - MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - I.getAlign(), AAMDNodes(), nullptr, SSID, Order); + MachinePointerInfo(I.getPointerOperand()), Flags, + LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(), + nullptr, SSID, Order); InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); @@ -5140,8 +5143,9 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering); + MachinePointerInfo(I.getPointerOperand()), Flags, + LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(), + nullptr, SSID, Ordering); SDValue Val = getValue(I.getValueOperand()); if (Val.getValueType() != MemVT) @@ -6904,7 +6908,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, auto MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, + MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), TempAlign); Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO); Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI); @@ -6933,7 +6937,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign, MachineMemOperand::MOStore); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( - MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, + MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), TempAlign); Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO); } @@ -8087,7 +8091,7 @@ void SelectionDAGBuilder::visitVPLoad( SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges); + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2], MMO, false /*IsExpanding */); if (AddToChain) @@ -8110,8 +8114,8 @@ void SelectionDAGBuilder::visitVPGather( unsigned AS = PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( - MachinePointerInfo(AS), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges); + MachinePointerInfo(AS), MachineMemOperand::MOLoad, + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); SDValue Base, Index, Scale; ISD::MemIndexType IndexType; bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale, @@ -8151,7 +8155,7 @@ void SelectionDAGBuilder::visitVPStore( SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, *Alignment, AAInfo); + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset, OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED, /* IsTruncating */ false, /*IsCompressing*/ false); @@ -8174,7 +8178,7 @@ void SelectionDAGBuilder::visitVPScatter( PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(AS), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, *Alignment, AAInfo); + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); SDValue Base, Index, Scale; ISD::MemIndexType IndexType; bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale, @@ -8217,7 +8221,7 @@ void SelectionDAGBuilder::visitVPStridedLoad( unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(AS), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges); + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2], OpValues[3], MMO, @@ -8240,7 +8244,7 @@ void SelectionDAGBuilder::visitVPStridedStore( unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(AS), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, *Alignment, AAInfo); + LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); SDValue ST = DAG.getStridedStoreVP( getMemoryRoot(), DL, OpValues[0], OpValues[1], diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 5b02c1bc39c0a7..9fbd516acea8e1 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1554,7 +1554,8 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, SmallVector Ops; DIExpression::appendOffset(Ops, Offset); Ops.push_back(dwarf::DW_OP_deref_size); - Ops.push_back(MMO->getSize()); + Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue() + : ~UINT64_C(0)); Expr = DIExpression::prependOpcodes(Expr, Ops); return ParamLoadedValue(*BaseOp, Expr); } diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 0ab2c401b1749d..d0adb78b231a76 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -2362,7 +2362,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { // Get the needed alignments to check them if // ldp-aligned-only/stp-aligned-only features are opted. uint64_t MemAlignment = MemOp->getAlign().value(); - uint64_t TypeAlignment = Align(MemOp->getSize()).value(); + uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value(); if (MemAlignment < 2 * TypeAlignment) { NumFailedAlignmentCheck++; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp index 7a49422c064b7c..677dd0b502b956 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -2852,8 +2852,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { return false; } - uint64_t MemSizeInBytes = LdSt.getMemSize(); - unsigned MemSizeInBits = LdSt.getMemSizeInBits(); + uint64_t MemSizeInBytes = LdSt.getMemSize().getValue(); + unsigned MemSizeInBits = LdSt.getMemSizeInBits().getValue(); AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering(); // Need special instructions for atomics that affect ordering. @@ -3276,7 +3276,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::GPRRegBankID; if (LoadMI && IsGPR) { const MachineMemOperand *MemOp = *LoadMI->memoperands_begin(); - unsigned BytesLoaded = MemOp->getSize(); + unsigned BytesLoaded = MemOp->getSize().getValue(); if (BytesLoaded < 4 && SrcTy.getSizeInBytes() == BytesLoaded) return selectCopy(I, TII, MRI, TRI, RBI); } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp index 51c52aad359497..d8ca5494ba50a4 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -311,7 +311,7 @@ bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) { LLT ValTy = MRI.getType(Store.getValueReg()); if (!ValTy.isVector() || ValTy.getSizeInBits() != 128) return false; - if (ValTy.getSizeInBits() != Store.getMemSizeInBits()) + if (Store.getMemSizeInBits() != ValTy.getSizeInBits()) return false; // Don't split truncating stores. if (!MRI.hasOneNonDBGUse(Store.getValueReg())) return false; @@ -658,7 +658,7 @@ bool AArch64PostLegalizerCombiner::optimizeConsecutiveMemOpAddressing( APInt Offset; LLT StoredValTy = MRI.getType(St->getValueReg()); unsigned ValSize = StoredValTy.getSizeInBits(); - if (ValSize < 32 || ValSize != St->getMMO().getSizeInBits()) + if (ValSize < 32 || St->getMMO().getSizeInBits() != ValSize) continue; Register PtrReg = St->getPointerReg(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index c99e490014668b..bba7682cd7a0d1 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -3556,7 +3556,10 @@ bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode *N) const { if (N->isDivergent() && !AMDGPUInstrInfo::isUniformMMO(MMO)) return false; - return Ld->getAlign() >= Align(std::min(MMO->getSize(), uint64_t(4))) && + return MMO->getSize().hasValue() && + Ld->getAlign() >= + Align(std::min(MMO->getSize().getValue().getKnownMinValue(), + uint64_t(4))) && ((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) || (Subtarget->getScalarizeGlobalBehavior() && diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index 0029c51231f286..90872516dd6db1 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -5661,7 +5661,7 @@ bool AMDGPULegalizerInfo::legalizeBufferStore(MachineInstr &MI, Register RSrc = MI.getOperand(2).getReg(); MachineMemOperand *MMO = *MI.memoperands_begin(); - const int MemSize = MMO->getSize(); + const int MemSize = MMO->getSize().getValue(); unsigned ImmOffset; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index b174d57bd57656..0037825ce08938 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -449,7 +449,7 @@ bool AMDGPURegisterBankInfo::isScalarLoadLegal(const MachineInstr &MI) const { const unsigned AS = MMO->getAddrSpace(); const bool IsConst = AS == AMDGPUAS::CONSTANT_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; - const unsigned MemSize = 8 * MMO->getSize(); + const unsigned MemSize = 8 * MMO->getSize().getValue(); // Require 4-byte alignment. return (MMO->getAlign() >= Align(4) || @@ -1070,7 +1070,7 @@ bool AMDGPURegisterBankInfo::applyMappingLoad( return false; MachineMemOperand *MMO = *MI.memoperands_begin(); - const unsigned MemSize = 8 * MMO->getSize(); + const unsigned MemSize = 8 * MMO->getSize().getValue(); // Scalar loads of size 8 or 16 bit with proper alignment may be widened to // 32 bit. Check to see if we need to widen the memory access, 8 or 16 bit // scalar loads should have a load size of 32 but memory access size of less diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index c19c3c6017a7c8..f4b21b7dfac391 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3635,12 +3635,13 @@ memOpsHaveSameBaseOperands(ArrayRef BaseOps1, return true; } -static bool offsetsDoNotOverlap(int WidthA, int OffsetA, - int WidthB, int OffsetB) { +static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA, + LocationSize WidthB, int OffsetB) { int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; - int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; - return LowOffset + LowWidth <= HighOffset; + LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; + return LowWidth.hasValue() && + LowOffset + (int)LowWidth.getValue() <= HighOffset; } bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, @@ -3662,8 +3663,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, // FIXME: Handle ds_read2 / ds_write2. return false; } - unsigned Width0 = MIa.memoperands().front()->getSize(); - unsigned Width1 = MIb.memoperands().front()->getSize(); + LocationSize Width0 = MIa.memoperands().front()->getSize(); + LocationSize Width1 = MIb.memoperands().front()->getSize(); return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); } diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 9c85ff3c43e22f..4ddee2f6d5befa 100644 --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -862,7 +862,7 @@ SILoadStoreOptimizer::combineKnownAdjacentMMOs(const CombineInfo &CI, const MachineMemOperand *MMOa = *CI.I->memoperands_begin(); const MachineMemOperand *MMOb = *Paired.I->memoperands_begin(); - unsigned Size = MMOa->getSize() + MMOb->getSize(); + unsigned Size = MMOa->getSize().getValue() + MMOb->getSize().getValue(); // A base pointer for the combined operation is the same as the leading // operation's pointer. diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index dd63ca17e5b9f1..5d0468948dfb61 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -3710,7 +3710,7 @@ unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const { for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), E = MI.memoperands_end(); I != E; ++I) { - Size += (*I)->getSize(); + Size += (*I)->getSize().getValue(); } // FIXME: The scheduler currently can't handle values larger than 16. But // the values can actually go up to 32 for floating-point load/store diff --git a/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp index 9b26aac6c0b71e..34b6f0575f7276 100644 --- a/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -191,7 +191,7 @@ ARMBankConflictHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { auto BasePseudoVal0 = MO0->getPseudoValue(); int64_t Offset0 = 0; - if (MO0->getSize() > 4) + if (!MO0->getSize().hasValue() || MO0->getSize().getValue() > 4) return NoHazard; bool SPvalid = false; @@ -259,8 +259,8 @@ void ARMBankConflictHazardRecognizer::EmitInstruction(SUnit *SU) { return; auto MO = *MI.memoperands().begin(); - uint64_t Size1 = MO->getSize(); - if (Size1 > 4) + LocationSize Size1 = MO->getSize(); + if (Size1.hasValue() && Size1.getValue() > 4) return; Accesses.push_back(&MI); } diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp index d8139958e9fcf2..7b8bcb2c5866da 100644 --- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -242,7 +242,9 @@ void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node, bool to_replace = false; SDLoc DL(Node); const LoadSDNode *LD = cast(Node); - uint64_t size = LD->getMemOperand()->getSize(); + if (!LD->getMemOperand()->getSize().hasValue()) + return; + uint64_t size = LD->getMemOperand()->getSize().getValue(); if (!size || size > 8 || (size & (size - 1)) || !LD->isSimple()) return; diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp index a647e699a8f07a..9d8e5c53b8227a 100644 --- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -292,8 +292,8 @@ bool HexagonStoreWidening::storesAreAdjacent(const MachineInstr *S1, int Off1 = S1->getOperand(1).getImm(); int Off2 = S2->getOperand(1).getImm(); - return (Off1 >= 0) ? Off1+S1MO.getSize() == unsigned(Off2) - : int(Off1+S1MO.getSize()) == Off2; + return (Off1 >= 0) ? Off1 + S1MO.getSize().getValue() == unsigned(Off2) + : int(Off1 + S1MO.getSize().getValue()) == Off2; } /// Given a sequence of adjacent stores, and a maximum size of a single wide @@ -315,7 +315,7 @@ bool HexagonStoreWidening::selectStores(InstrGroup::iterator Begin, assert(!FirstMI->memoperands_empty() && "Expecting some memory operands"); const MachineMemOperand &FirstMMO = getStoreTarget(FirstMI); unsigned Alignment = FirstMMO.getAlign().value(); - unsigned SizeAccum = FirstMMO.getSize(); + unsigned SizeAccum = FirstMMO.getSize().getValue(); unsigned FirstOffset = getStoreOffset(FirstMI); // The initial value of SizeAccum should always be a power of 2. @@ -357,7 +357,7 @@ bool HexagonStoreWidening::selectStores(InstrGroup::iterator Begin, if (!storesAreAdjacent(S1, S2)) break; - unsigned S2Size = getStoreTarget(S2).getSize(); + unsigned S2Size = getStoreTarget(S2).getSize().getValue(); if (SizeAccum + S2Size > std::min(MaxSize, Alignment)) break; @@ -405,7 +405,7 @@ bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG, MachineOperand &SO = MI->getOperand(2); // Source. assert(SO.isImm() && "Expecting an immediate operand"); - unsigned NBits = MMO.getSize()*8; + unsigned NBits = MMO.getSize().getValue() * 8; unsigned Mask = (0xFFFFFFFFU >> (32-NBits)); unsigned Val = (SO.getImm() & Mask) << Shift; Acc |= Val; diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp index 654f29d08af0b5..4e1e27088cce42 100644 --- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp @@ -184,7 +184,8 @@ MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I, const Register ValueReg = I.getOperand(0).getReg(); const LLT Ty = MRI.getType(ValueReg); const unsigned TySize = Ty.getSizeInBits(); - const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize(); + const unsigned MemSizeInBytes = + (*I.memoperands_begin())->getSize().getValue(); unsigned Opc = I.getOpcode(); const bool isStore = Opc == TargetOpcode::G_STORE; @@ -455,7 +456,8 @@ bool MipsInstructionSelector::select(MachineInstr &I) { } // Unaligned memory access - if (MMO->getAlign() < MMO->getSize() && + if ((!MMO->getSize().hasValue() || + MMO->getAlign() < MMO->getSize().getValue()) && !STI.systemSupportsUnalignedAccess()) { if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI)) return false; diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp index f5e94235859a06..3307e840a2afd6 100644 --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -344,7 +344,7 @@ bool MipsLegalizerInfo::legalizeCustom( switch (MI.getOpcode()) { case G_LOAD: case G_STORE: { - unsigned MemSize = (**MI.memoperands_begin()).getSize(); + unsigned MemSize = (**MI.memoperands_begin()).getSize().getValue(); Register Val = MI.getOperand(0).getReg(); unsigned Size = MRI.getType(Val).getSizeInBits(); diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp index acf0d6312ef51b..639e623954ffc6 100644 --- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp @@ -70,9 +70,10 @@ class MipsPreLegalizerCombinerImpl : public Combiner { // subtarget doesn't support them. auto MMO = *MI.memoperands_begin(); const MipsSubtarget &STI = MI.getMF()->getSubtarget(); - if (!isPowerOf2_64(MMO->getSize())) + if (!MMO->getSize().hasValue() || + !isPowerOf2_64(MMO->getSize().getValue())) return false; - bool isUnaligned = MMO->getAlign() < MMO->getSize(); + bool isUnaligned = MMO->getAlign() < MMO->getSize().getValue(); if (!STI.systemSupportsUnalignedAccess() && isUnaligned) return false; diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp index db7afc3c86c57e..6af1fd8c88e570 100644 --- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp @@ -155,7 +155,8 @@ static bool isGprbTwoInstrUnalignedLoadOrStore(const MachineInstr *MI) { auto MMO = *MI->memoperands_begin(); const MipsSubtarget &STI = MI->getMF()->getSubtarget(); if (MMO->getSize() == 4 && (!STI.systemSupportsUnalignedAccess() && - MMO->getAlign() < MMO->getSize())) + (!MMO->getSize().hasValue() || + MMO->getAlign() < MMO->getSize().getValue()))) return true; } return false; diff --git a/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp b/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp index 98cd3a82a6e05d..3283a5bb69404c 100644 --- a/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp +++ b/llvm/lib/Target/PowerPC/GISel/PPCInstructionSelector.cpp @@ -739,7 +739,7 @@ bool PPCInstructionSelector::select(MachineInstr &I) { auto SelectLoadStoreAddressingMode = [&]() -> MachineInstr * { const unsigned NewOpc = selectLoadStoreOp( I.getOpcode(), RBI.getRegBank(LdSt.getReg(0), MRI, TRI)->getID(), - LdSt.getMemSizeInBits()); + LdSt.getMemSizeInBits().getValue()); if (NewOpc == I.getOpcode()) return nullptr; diff --git a/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp index ffaa3e05c84790..a942d2f9c7e8e5 100644 --- a/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp +++ b/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp @@ -374,8 +374,9 @@ getHazardType(SUnit *SU, int Stalls) { // overlapping address. if (isLoad && NumStores && !MI->memoperands_empty()) { MachineMemOperand *MO = *MI->memoperands_begin(); - if (isLoadOfStoredAddress(MO->getSize(), - MO->getOffset(), MO->getValue())) + if (MO->getSize().hasValue() && + isLoadOfStoredAddress(MO->getSize().getValue(), MO->getOffset(), + MO->getValue())) return NoopHazard; } @@ -399,9 +400,10 @@ void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) { if (Opcode == PPC::MTCTR || Opcode == PPC::MTCTR8) HasCTRSet = true; // Track the address stored to. - if (isStore && NumStores < 4 && !MI->memoperands_empty()) { + if (isStore && NumStores < 4 && !MI->memoperands_empty() && + (*MI->memoperands_begin())->getSize().hasValue()) { MachineMemOperand *MO = *MI->memoperands_begin(); - StoreSize[NumStores] = MO->getSize(); + StoreSize[NumStores] = MO->getSize().getValue(); StoreOffset[NumStores] = MO->getOffset(); StoreValue[NumStores] = MO->getValue(); ++NumStores; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index aef2d483c6df1e..16a0a4722ad484 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8519,7 +8519,8 @@ bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { // If there is no LXSIBZX/LXSIHZX, like Power8, // prefer direct move if the memory size is 1 or 2 bytes. MachineMemOperand *MMO = cast(Origin)->getMemOperand(); - if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) + if (!Subtarget.hasP9Vector() && + (!MMO->getSize().hasValue() || MMO->getSize().getValue() <= 2)) return true; for (SDNode::use_iterator UI = Origin->use_begin(), @@ -15114,7 +15115,7 @@ SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, // If the MMO suggests this isn't a load of a full vector, leave // things alone. For a built-in, we have to make the change for // correctness, so if there is a size problem that will be a bug. - if (MMO->getSize() < 16) + if (!MMO->getSize().hasValue() || MMO->getSize().getValue() < 16) return SDValue(); break; } @@ -15182,7 +15183,7 @@ SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, // If the MMO suggests this isn't a store of a full vector, leave // things alone. For a built-in, we have to make the change for // correctness, so if there is a size problem that will be a bug. - if (MMO->getSize() < 16) + if (!MMO->getSize().hasValue() || MMO->getSize().getValue() < 16) return SDValue(); break; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 37a8079dcbf10d..4ca300f9151e1a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -656,7 +656,7 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, if (IsScalableVector) { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, MFI.getObjectAlign(FI)); + LocationSize::beforeOrAfterPointer(), MFI.getObjectAlign(FI)); MFI.setStackID(FI, TargetStackID::ScalableVector); BuildMI(MBB, I, DebugLoc(), get(Opcode)) @@ -739,7 +739,7 @@ void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, if (IsScalableVector) { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, MFI.getObjectAlign(FI)); + LocationSize::beforeOrAfterPointer(), MFI.getObjectAlign(FI)); MFI.setStackID(FI, TargetStackID::ScalableVector); BuildMI(MBB, I, DebugLoc(), get(Opcode), DstReg) diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 53e9bf9a9d1bb0..2a6dce863c28f1 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -2020,11 +2020,12 @@ areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, } if (SameVal) { int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset(); - int WidthA = MMOa->getSize(), WidthB = MMOb->getSize(); + LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize(); int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; - int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; - if (LowOffset + LowWidth <= HighOffset) + LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; + if (LowWidth.hasValue() && + LowOffset + (int)LowWidth.getValue() <= HighOffset) return true; } diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp index 04931afdec51c3..a72eeb53915d65 100644 --- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp +++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp @@ -521,8 +521,8 @@ bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1, return true; int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset()); - int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset; - int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset; + int64_t Overlapa = Op1.getSize().getValue() + Op1.getOffset() - MinOffset; + int64_t Overlapb = Op2.getSize().getValue() + Op2.getOffset() - MinOffset; return !AA->isNoAlias( MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()), @@ -688,7 +688,7 @@ bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) { !isRelevantAddressingMode(PBInst) || !PBInst->hasOneMemOperand()) continue; int64_t PBstDispImm = getDispOperand(PBInst).getImm(); - unsigned PBstSize = (*PBInst->memoperands_begin())->getSize(); + unsigned PBstSize = (*PBInst->memoperands_begin())->getSize().getValue(); // This check doesn't cover all cases, but it will suffice for now. // TODO: take branch probability into consideration, if the blocking // store is in an unreached block, breaking the memcopy could lose