Skip to content

Commit

Permalink
Reduce warning count introduced by llvm 19 migration.
Browse files Browse the repository at this point in the history
  • Loading branch information
Felix-Rm committed Dec 3, 2024
1 parent 3e36cca commit 560ec2a
Show file tree
Hide file tree
Showing 18 changed files with 248 additions and 241 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ Value createArithIntOrFloatOp(OpBuilder &builder, Location loc, Value a,
Value b) {
assert(a.getType() == b.getType() && "Mismatched type");
assert(a.getType().isIntOrIndexOrFloat() && "Expected scalar type");
if (a.getType().isa<IntegerType>()) {
if (isa<IntegerType>(a.getType())) {
return builder.create<IntOp>(loc, a, b);
} else {
return builder.create<FloatOp>(loc, a, b);
Expand Down
4 changes: 2 additions & 2 deletions cinnamon/lib/Conversion/CimToMemristor/CimToMemristor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ struct ConvertCimOpToMemristor : OpConversionPattern<CimOp> {
ConversionPatternRewriter &rewriter) const override {

auto tileId = op.getOperand(0);
auto resultShape = op.getResult().getType().template cast<ShapedType>();
auto resultShape = cast<ShapedType>(op.getResult().getType());

auto resultAllocOp = rewriter.create<bufferization::AllocTensorOp>(
op.getLoc(),
Expand All @@ -49,7 +49,7 @@ struct ConvertCimOpToMemristor : OpConversionPattern<CimOp> {
ValueRange{});

auto createBufferizeOp = [&](Value value) {
auto shapedType = value.getType().cast<ShapedType>();
auto shapedType = cast<ShapedType>(value.getType());
return rewriter.create<bufferization::ToMemrefOp>(
op.getLoc(),
MemRefType::get(shapedType.getShape(), shapedType.getElementType()),
Expand Down
2 changes: 1 addition & 1 deletion cinnamon/lib/Conversion/CinmToCim/CinmToCim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ namespace {
// Creates the specified type for a value with correct shape and element type
// Condition: The value must be shaped type
template <typename T> static T getShapedType(Value value) {
auto shapedType = value.getType().cast<ShapedType>();
auto shapedType = cast<ShapedType>(value.getType());
return T::get(shapedType.getShape(), shapedType.getElementType());
}

Expand Down
28 changes: 13 additions & 15 deletions cinnamon/lib/Conversion/CinmToCnm/CinmToCnm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -298,14 +298,14 @@ LogicalResult convertInputIntoAlloc(Location loc, Value &inputBuf,
// For each input of the reduce, we need to

// convert single element to tensor<1xelementTy>
if (!inputBuf.getType().dyn_cast<RankedTensorType>()) {
if (!isa<RankedTensorType>(inputBuf.getType())) {
inputBuf = rewriter.create<tensor::FromElementsOp>(
RankedTensorType::get(SmallVector<int64_t>(wgTy.getShape().size(), 1),
inputBuf.getType()),
ValueRange{inputBuf});
}

auto inputType = inputBuf.getType().cast<RankedTensorType>();
auto inputType = cast<RankedTensorType>(inputBuf.getType());

llvm::SmallVector<int64_t, 1> shapeOfBuffer;
std::optional<SmallVector<int64_t>> reshapeInto;
Expand All @@ -318,9 +318,9 @@ LogicalResult convertInputIntoAlloc(Location loc, Value &inputBuf,
return failure();

if (reshapeInto) {
inputBuf =
cinm::reshapeStatic(rewriter, rewriter.getLoc(), inputBuf,
inputType.cast<RankedTensorType>(), *reshapeInto);
inputBuf = cinm::reshapeStatic(rewriter, rewriter.getLoc(), inputBuf,
cast<RankedTensorType>(inputType),
*reshapeInto);
}

// Allocate a cinm buffer
Expand Down Expand Up @@ -350,7 +350,7 @@ cnm::LaunchOp createLaunchOp(
auto &launchBlock = launchOp.getBody().emplaceBlock();
// arguments are memrefs with same shape as inputs
for (auto input : launchOp.getParams()) {
if (auto inputTy = input.getType().dyn_cast<cnm::BufferType>()) {
if (auto inputTy = dyn_cast<cnm::BufferType>(input.getType())) {
auto mappedTy =
MemRefType::get(inputTy.getShape(), inputTy.getElementType());
launchBlock.addArgument(mappedTy, input.getLoc());
Expand Down Expand Up @@ -428,8 +428,8 @@ LogicalResult convertCinmToCnm(
auto res = builder.create<cnm::GatherOp>(alloc, workgroup, map, outBuf);
auto shapedBack = cinm::reshapeStatic(
builder, builder.getLoc(),
res.getOutput().cast<TypedValue<RankedTensorType>>(),
result.getType().cast<RankedTensorType>().getShape());
cast<TypedValue<RankedTensorType>>(res.getOutput()),
cast<RankedTensorType>(result.getType()).getShape());

resultValues.push_back(shapedBack);
}
Expand Down Expand Up @@ -514,7 +514,7 @@ struct ConvertElementWiseToCnm : public OpConversionPattern<CinmOp> {
ValueRange outputs) {
SmallVector<AffineMap> affineMaps;
for (const auto &i : inputs) {
MemRefType t = i.getType().cast<MemRefType>();
MemRefType t = cast<MemRefType>(i.getType());
affineMaps.push_back(AffineMap::getMultiDimIdentityMap(
t.getRank(), op.getContext()));

Expand All @@ -541,7 +541,7 @@ struct ConvertElementWiseToCnm : public OpConversionPattern<CinmOp> {
Value rhs = IsScalarOp ? inputs[1u] : args[1u];
if constexpr (IsScalarOp) {
if (const auto memrefType =
rhs.getType().dyn_cast<MemRefType>()) {
dyn_cast<MemRefType>(rhs.getType())) {
const Value zero =
builder.create<arith::ConstantIndexOp>(loc, 0);
rhs = builder.create<memref::LoadOp>(
Expand Down Expand Up @@ -622,7 +622,7 @@ struct ConvertCinmGemmToCnm : public OpConversionPattern<cinm::GemmOp> {
using OpConversionPattern<cinm::GemmOp>::OpConversionPattern;

static Value transpose(ImplicitLocOpBuilder &builder, Value tensor) {
auto inTy = tensor.getType().cast<RankedTensorType>();
auto inTy = cast<RankedTensorType>(tensor.getType());
auto shape = inTy.getShape();
SmallVector<int64_t, 2> newShape{shape[1], shape[0]};
SmallVector<int64_t, 2> perms{1, 0};
Expand Down Expand Up @@ -785,10 +785,8 @@ struct ConvertCinmReduceToCnm : public OpConversionPattern<cinm::ReduceOp> {
op.getResult().getType(),
builder.getZeroAttr(op.getResult().getType()));

const bool isFloatOp = op.getType()
.cast<ShapedType>()
.getElementType()
.dyn_cast<FloatType>() != nullptr;
const bool isFloatOp = isa<FloatType>(
cast<ShapedType>(op.getType()).getElementType());

llvm::SmallVector<Value, 1> newResults;
if (convertCinmToCnm(
Expand Down
14 changes: 7 additions & 7 deletions cinnamon/lib/Conversion/CnmToGPU/CnmToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ MemRefType convertCnmBufferToMemRefType(cnm::BufferType bufferType) {
void convertLaunchParameter(ConversionPatternRewriter &rewriter, Location loc,
Value buffer, ValueRange threadIds,
BlockArgument arg) {
if (!buffer.getType().dyn_cast<cnm::BufferType>()) {
const auto bufferType = dyn_cast<cnm::BufferType>(buffer.getType());

if (!bufferType)
return;
}

const BufferType bufferType = buffer.getType().dyn_cast<cnm::BufferType>();
const MemRefType memrefType = convertCnmBufferToMemRefType(bufferType);

const Value source = createOrFoldUnrealizedConversionCast(
Expand Down Expand Up @@ -122,8 +122,8 @@ struct ConvertCnmScatterToGPU : public OpConversionPattern<cnm::ScatterOp> {
ConversionPatternRewriter &rewriter) const override {
const WorkgroupType workgroupType = op.getWg().getType();
const ArrayRef<int64_t> workgroupShape = workgroupType.getShape();
const cnm::BufferType bufferType =
op.getOperandTypes()[1].dyn_cast<cnm::BufferType>();
const auto bufferType =
dyn_cast<cnm::BufferType>(op.getOperand(1).getType());

Value src = rewriter.getRemappedValue(op.getOperand(0));
Value dst = rewriter.getRemappedValue(op.getOperand(1));
Expand Down Expand Up @@ -155,8 +155,8 @@ struct ConvertCnmGatherToGPU : public OpConversionPattern<cnm::GatherOp> {
ConversionPatternRewriter &rewriter) const override {
const WorkgroupType workgroupType = op.getWg().getType();
const ArrayRef<int64_t> workgroupShape = workgroupType.getShape();
const cnm::BufferType bufferType =
op.getOperandTypes()[0].dyn_cast<cnm::BufferType>();
const auto bufferType =
dyn_cast<cnm::BufferType>(op.getOperand(0).getType());

Value src = rewriter.getRemappedValue(op.getOperand(0));
src = createOrFoldUnrealizedConversionCast(
Expand Down
10 changes: 5 additions & 5 deletions cinnamon/lib/Conversion/CnmToUPMEM/CnmToUPMEM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ template <typename T> T reduceMul(ArrayRef<T> arr) {
}

MemRefType convertTensorToMemref(ShapedType ty) {
if (ty.isa<MemRefType>())
return ty.cast<MemRefType>();
if (isa<MemRefType>(ty))
return cast<MemRefType>(ty);

return MemRefType::get(ty.getShape(), ty.getElementType());
}
Expand Down Expand Up @@ -127,7 +127,7 @@ struct ConvertCnmGatherToUPMEM : public OpConversionPattern<cnm::GatherOp> {
ConversionPatternRewriter &rewriter) const override {

Value outputBuf = adaptor.getOutputBuf();
bool isBufferized = op.getOutputBuf().getType().isa<BaseMemRefType>();
bool isBufferized = isa<BaseMemRefType>(op.getOutputBuf().getType());
if (!isBufferized) {
outputBuf = rewriter.create<memref::AllocOp>(
op->getLoc(), convertTensorToMemref(op.getOutputBuf().getType()));
Expand Down Expand Up @@ -166,7 +166,7 @@ struct ConvertCnmLaunchToUPMEM : public OpConversionPattern<cnm::LaunchOp> {
const size_t availableWRAM = 32 * 1024;
size_t requiredWRAM = 0;
for (Value buffer : op.getParams()) {
const BufferType bufferType = buffer.getType().cast<BufferType>();
const BufferType bufferType = cast<BufferType>(buffer.getType());
const size_t elementSize =
bufferType.getElementType().getIntOrFloatBitWidth() / 8;
requiredWRAM += reduceMul(bufferType.getShape()) * elementSize;
Expand Down Expand Up @@ -208,7 +208,7 @@ struct ConvertCnmLaunchToUPMEM : public OpConversionPattern<cnm::LaunchOp> {
continue;
}

const BufferType bufferType = buffer.getType().cast<BufferType>();
const BufferType bufferType = cast<BufferType>(buffer.getType());
const size_t chunkSize = reduceMul(bufferType.getShape());
const size_t memoryPerTasklet = chunksPerTasklet * chunkSize;
const size_t memoryPerDPU = wgShape[2] * memoryPerTasklet;
Expand Down
6 changes: 3 additions & 3 deletions cinnamon/lib/Conversion/CommonPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ LogicalResult ConvertCnmSetZeroToAffine::matchAndRewrite(
cnm::SetZeroOp op, OpAdaptor, ConversionPatternRewriter &rewriter) const {
const Value dst = rewriter.getRemappedValue(op.getOperand());

const MemRefType type = dst.getType().cast<MemRefType>();
const MemRefType type = cast<MemRefType>(dst.getType());
const SmallVector<int64_t> loopSizes{type.getShape()};
const SmallVector<int64_t> loopSteps(loopSizes.size(), 1);

Expand Down Expand Up @@ -125,8 +125,8 @@ SmallVector<Value> createAffineApply(OpBuilder &builder, Location loc,
void createMemrefSubviewCopy(OpBuilder &builder, Location loc, Value src,
Value dst, ArrayRef<int64_t> sliceShape,
ValueRange srcOffsets, ValueRange dstOffsets) {
MemRefType srcType = src.getType().cast<MemRefType>();
MemRefType dstType = dst.getType().cast<MemRefType>();
MemRefType srcType = cast<MemRefType>(src.getType());
MemRefType dstType = cast<MemRefType>(dst.getType());

SmallVector<int64_t> srcStaticOffsets(srcType.getRank(), 0);
SmallVector<int64_t> srcStaticSizes{srcType.getShape()};
Expand Down
6 changes: 3 additions & 3 deletions cinnamon/lib/Conversion/TorchToCinm/TorchToCinm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,20 +49,20 @@ struct ConvertTorchTensorOpToCinm : OpConversionPattern<SourceOp> {
ConversionPatternRewriter &rewriter) const override {

auto lhs = op.getOperand(0);
auto lhsType = lhs.getType().template cast<torch::Torch::ValueTensorType>();
auto lhsType = cast<torch::Torch::ValueTensorType>(lhs.getType());
auto lhsConversionOp =
rewriter.create<torch::TorchConversion::ToBuiltinTensorOp>(
op.getLoc(), lhsType.toBuiltinTensor(), lhs);

auto rhs = op.getOperand(1);
auto rhsType = rhs.getType().template cast<torch::Torch::ValueTensorType>();
auto rhsType = cast<torch::Torch::ValueTensorType>(rhs.getType());
auto rhsConversionOp =
rewriter.create<torch::TorchConversion::ToBuiltinTensorOp>(
op.getLoc(), rhsType.toBuiltinTensor(), rhs);

auto result = op.getResult();
auto resultType =
result.getType().template cast<torch::Torch::ValueTensorType>();
cast<torch::Torch::ValueTensorType>(result.getType());

auto cinmComputeOp = rewriter.create<cinm::ComputeOp>(
op.getLoc(), resultType.toBuiltinTensor());
Expand Down
12 changes: 6 additions & 6 deletions cinnamon/lib/Conversion/UPMEMToLLVM/UPMEMToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ static FailureOr<AffineMap> linearizeAffineMap(AffineMap map,
}

auto layoutMap = bufferTy.getLayout().getAffineMap();
if (bufferTy.getLayout().isa<StridedLayoutAttr>()) {
if (isa<StridedLayoutAttr>(bufferTy.getLayout())) {
// Replace offsets with 0 to delete the symbols.
// Offset is calculated outside of the affine map.
layoutMap = layoutMap.replaceDimsAndSymbols(
Expand Down Expand Up @@ -315,8 +315,8 @@ outlineAffineMap(ImplicitLocOpBuilder &rewriter,
// to find it later
affineMapFun->setAttr("upmem.generated_from", AffineMapAttr::get(*linearMap));

rewriter = ImplicitLocOpBuilder::atBlockBegin(rewriter.getLoc(),
affineMapFun.addEntryBlock(rewriter));
rewriter = ImplicitLocOpBuilder::atBlockBegin(
rewriter.getLoc(), affineMapFun.addEntryBlock(rewriter));
Value arg = affineMapFun.getArgument(0);
// affine expects to deal with index type only
arg = createOrFoldUnrealizedConversionCast(rewriter.getLoc(), rewriter,
Expand Down Expand Up @@ -376,7 +376,7 @@ static LogicalResult lowerScatterOrGather(Op op, typename Op::Adaptor adaptor,
}

Value bareHostBuf = adaptor.getHostBuffer();
if (adaptor.getHostBuffer().getType().template isa<LLVM::LLVMStructType>()) {
if (isa<LLVM::LLVMStructType>(adaptor.getHostBuffer().getType())) {
// Here we compute the pointer to the start of the memref
// converted memref
Value basePtr =
Expand Down Expand Up @@ -557,8 +557,8 @@ struct ConvertUPMEMToLLVMPass
const auto addUnrealizedCast = [](OpBuilder &builder, Type type,
ValueRange inputs,
Location loc) -> Value {
// if (type.isa<BaseMemRefType>() && inputs.size() == 1 &&
// inputs[0].getType().isa<RankedTensorType>()) {
// if (isa<BaseMemRefType>(type) && inputs.size() == 1 &&
// isa<RankedTensorType>(inputs[0].getType())) {
// return builder.create<bufferization::ToMemrefOp>(loc, type, inputs)
// .getResult();
// }
Expand Down
15 changes: 8 additions & 7 deletions cinnamon/lib/Dialect/Cim/IR/CimOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,15 @@ void AcquireDeviceOp::getAsmResultNames(::mlir::OpAsmSetValueNameFn setNameFn) {
setNameFn(getResult(), "cim_dev");
}

void AcquireCrossbarOp::getAsmResultNames(::mlir::OpAsmSetValueNameFn setNameFn) {
void AcquireCrossbarOp::getAsmResultNames(
::mlir::OpAsmSetValueNameFn setNameFn) {
setNameFn(getResult(), "cim_cbr");
}

::mlir::LogicalResult GemmOp::verify() {
auto lhs = getLhs().getType().cast<ShapedType>();
auto rhs = getRhs().getType().cast<ShapedType>();
auto result = getResult().getType().cast<ShapedType>();
auto lhs = cast<ShapedType>(getLhs().getType());
auto rhs = cast<ShapedType>(getRhs().getType());
auto result = cast<ShapedType>(getResult().getType());

if (lhs.getElementType() != rhs.getElementType())
return emitOpError("lhs and rhs must have the same element type");
Expand All @@ -67,9 +68,9 @@ ::mlir::LogicalResult GemmOp::verify() {
}

::mlir::LogicalResult GemvOp::verify() {
auto lhs = getLhs().getType().cast<ShapedType>();
auto rhs = getRhs().getType().cast<ShapedType>();
auto result = getResult().getType().cast<ShapedType>();
auto lhs = cast<ShapedType>(getLhs().getType());
auto rhs = cast<ShapedType>(getRhs().getType());
auto result = cast<ShapedType>(getResult().getType());

if (lhs.getElementType() != rhs.getElementType())
return emitOpError("lhs and rhs must have the same element type");
Expand Down
Loading

0 comments on commit 560ec2a

Please sign in to comment.