-
Notifications
You must be signed in to change notification settings - Fork 14.4k
[mlir][memref] Add a new ReifyResultShapes
pass
#145927
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
465c660
1f4fba7
ba51026
08a6823
b45abc7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
//===- ReifyResultShapes.cpp - Reify result shapes ------------------------===// | ||
// | ||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
// | ||
//===----------------------------------------------------------------------===// | ||
// | ||
// This transform reifies result shapes of `ReifyRankedShapedTypeOpInterface` | ||
// operations with ranked `memref` and `tensor` results. | ||
// | ||
//===----------------------------------------------------------------------===// | ||
|
||
#include "mlir/Dialect/MemRef/Transforms/Passes.h" | ||
|
||
#include "mlir/Dialect/Affine/IR/AffineOps.h" | ||
#include "mlir/Dialect/MemRef/IR/MemRef.h" | ||
#include "mlir/Dialect/MemRef/Transforms/Transforms.h" | ||
#include "mlir/Dialect/Tensor/IR/Tensor.h" | ||
#include "mlir/Interfaces/InferTypeOpInterface.h" | ||
#include "llvm/Support/InterleavedRange.h" | ||
|
||
#define DEBUG_TYPE "reify-result-shapes" | ||
#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ") | ||
|
||
namespace mlir { | ||
namespace memref { | ||
#define GEN_PASS_DEF_REIFYRESULTSHAPESPASS | ||
#include "mlir/Dialect/MemRef/Transforms/Passes.h.inc" | ||
} // namespace memref | ||
} // namespace mlir | ||
|
||
using namespace mlir; | ||
|
||
LogicalResult | ||
mlir::memref::reifyOpResultShapes(RewriterBase &rewriter, | ||
ReifyRankedShapedTypeOpInterface op) { | ||
LLVM_DEBUG({ DBGS() << " reifying op: " << op << "\n"; }); | ||
// Get the reified out shapes. | ||
ReifiedRankedShapedTypeDims reifiedResultShapes; | ||
if (failed(mlir::reifyResultShapes(rewriter, op, reifiedResultShapes)) || | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is my source of confusion. As far as I know this is meant to extract information about the shape of the result of the, but this is actually changing the operation itself. This seems like something that cannot be done just based on the interface/clone. The change in the result type might make the operation invalid (according to its verifier). This kind of rewrite cannot really be done just on the interface. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To me the interface description establishes an implicit contract allowing this:
Because, what would it mean for From my POV the interface solves this issue with the return of the Nonetheless, I do see the argument for making the implicit contract explicit. So how about adding something along the lines the following method to the interface?
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I am not sure I fully follow the logic. Lets assume the op is currently valid and you are getting the shape of the result. Now you are change the shape of the result without modifying any of its other operands. There is no way you can make an interface gaurantee that the shape it found is considered valid by the operation. The operation method itself might not know that. The inconsistency could be coming from the verifier checking consistency between the other operands values and its result type. You really cannot expect the |
||
reifiedResultShapes.empty()) { | ||
return op.emitError() << "failed to get the reified shapes"; | ||
} | ||
|
||
bool modified = false; | ||
// Compute the new output types. | ||
SmallVector<Type> outTypes; | ||
for (const auto &[oldTy, reifiedShape] : | ||
llvm::zip(op->getResultTypes(), reifiedResultShapes)) { | ||
// Skip if it's not a memref or tensor type. | ||
if (!isa<RankedTensorType, MemRefType>(oldTy)) { | ||
outTypes.push_back(oldTy); | ||
continue; | ||
} | ||
|
||
ShapedType shapedTy = dyn_cast<ShapedType>(oldTy); | ||
|
||
SmallVector<int64_t> shape = llvm::to_vector(shapedTy.getShape()); | ||
for (auto &&[dim, ofr] : llvm::zip_equal(shape, reifiedShape)) { | ||
std::optional<int64_t> maybeCst = getConstantIntValue(ofr); | ||
// If the reified dim is dynamic set it appropriately. | ||
if (!maybeCst.has_value()) { | ||
dim = ShapedType::kDynamic; | ||
continue; | ||
} | ||
// Set the static dim. | ||
dim = *maybeCst; | ||
} | ||
|
||
// If the shape didn't change continue. | ||
if (shape == shapedTy.getShape()) { | ||
outTypes.push_back(oldTy); | ||
continue; | ||
} | ||
modified = true; | ||
outTypes.push_back(shapedTy.cloneWith(shape, shapedTy.getElementType())); | ||
} | ||
|
||
// Return if we don't need to update. | ||
if (!modified) { | ||
LLVM_DEBUG({ DBGS() << "- op doesn't require update\n"; }); | ||
return success(); | ||
} | ||
|
||
LLVM_DEBUG({ | ||
DBGS() << "- oldTypes: " << llvm::interleaved_array(op->getResultTypes()) | ||
<< " \n"; | ||
DBGS() << "- outTypes: " << llvm::interleaved_array(outTypes) << " \n"; | ||
}); | ||
|
||
// We now have outTypes that need to be turned to cast ops. | ||
Location loc = op->getLoc(); | ||
SmallVector<Value> newResults; | ||
Operation *newOp = rewriter.clone(*op); | ||
nicolasvasilache marked this conversation as resolved.
Show resolved
Hide resolved
|
||
for (auto [reifiedTy, oldRes] : llvm::zip(outTypes, op->getResults())) { | ||
OpResult newRes = newOp->getResult(oldRes.getResultNumber()); | ||
Type oldTy = oldRes.getType(); | ||
// Continue if the type remained invariant or is not shaped. | ||
if (oldTy == reifiedTy || !isa<MemRefType, RankedTensorType>(oldTy)) { | ||
newResults.push_back(newRes); | ||
continue; | ||
} | ||
|
||
// Update the type. | ||
newRes.setType(reifiedTy); | ||
if (isa<RankedTensorType>(reifiedTy)) { | ||
newResults.push_back(rewriter.create<tensor::CastOp>(loc, oldTy, newRes)); | ||
} else { | ||
assert(isa<MemRefType>(reifiedTy) && "expected a memref type"); | ||
newResults.push_back(rewriter.create<memref::CastOp>(loc, oldTy, newRes)); | ||
} | ||
} | ||
|
||
LLVM_DEBUG({ | ||
DBGS() << "- reified results " << llvm::interleaved_array(newResults) | ||
<< "\n"; | ||
}); | ||
rewriter.replaceOp(op, newResults); | ||
return success(); | ||
} | ||
|
||
//===----------------------------------------------------------------------===// | ||
// Pass registration | ||
//===----------------------------------------------------------------------===// | ||
|
||
namespace { | ||
struct ReifyResultShapesPass final | ||
: public memref::impl::ReifyResultShapesPassBase<ReifyResultShapesPass> { | ||
void runOnOperation() override; | ||
}; | ||
} // namespace | ||
|
||
void ReifyResultShapesPass::runOnOperation() { | ||
SmallVector<ReifyRankedShapedTypeOpInterface> ops; | ||
getOperation()->walk( | ||
[&](ReifyRankedShapedTypeOpInterface op) { ops.push_back(op); }); | ||
IRRewriter rewriter(&getContext()); | ||
for (ReifyRankedShapedTypeOpInterface op : ops) { | ||
rewriter.setInsertionPoint(op); | ||
if (failed(memref::reifyOpResultShapes(rewriter, op))) | ||
return signalPassFailure(); | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
// RUN: mlir-opt -reify-result-shapes %s | FileCheck %s | ||
|
||
// The test below checks concat op reification. In the first case, no cast is inserted while on the second a cast gets inserted. | ||
// CHECK-LABEL: func.func @concat_reification | ||
func.func @concat_reification(%arg0: tensor<4x7x3xf32>, %arg1 : tensor<4x4x3xf32>, %arg2: tensor<?x?x?xf32>) | ||
-> (tensor<4x11x3xf32>, tensor<?x?x?xf32>) { | ||
// CHECK: %[[RES0:.*]] = tensor.concat dim(1) %{{.*}} : (tensor<4x7x3xf32>, tensor<4x4x3xf32>) -> tensor<4x11x3xf32> | ||
%1 = tensor.concat dim(1) %arg0, %arg1 : (tensor<4x7x3xf32>, tensor<4x4x3xf32>) -> tensor<4x11x3xf32> | ||
// CHECK: %[[V0:.*]] = tensor.concat dim(2) %{{.*}} : (tensor<4x7x3xf32>, tensor<?x?x?xf32>) -> tensor<4x7x?xf32> | ||
// CHECK: %[[RES1:.*]] = tensor.cast %[[V0]] : tensor<4x7x?xf32> to tensor<?x?x?xf32> | ||
%2 = tensor.concat dim(2) %arg0, %arg2 : (tensor<4x7x3xf32>, tensor<?x?x?xf32>) -> tensor<?x?x?xf32> | ||
// CHECK: return %[[RES0]], %[[RES1]] : tensor<4x11x3xf32>, tensor<?x?x?xf32> | ||
return %1, %2 : tensor<4x11x3xf32>, tensor<?x?x?xf32> | ||
} | ||
|
||
// CHECK-LABEL: func.func @pad_reification | ||
func.func @pad_reification(%cst : f32, %idx : index, %t: tensor<64x?x64xf32>) -> tensor<1x?x64xf32> { | ||
%pad_amt = affine.apply affine_map<(d0) -> (-d0 + 256)>(%idx) | ||
%es = tensor.extract_slice %t[0, 0, 0] [1, %idx, 64] [1, 1, 1] | ||
: tensor<64x?x64xf32> to tensor<1x?x64xf32> | ||
|
||
// CHECK: tensor.pad | ||
// CHECK: : tensor<1x?x64xf32> to tensor<1x256x64xf32> | ||
// CHECK: tensor.cast %{{.*}} : tensor<1x256x64xf32> to tensor<1x?x64xf32> | ||
%padded = tensor.pad %es low[0, 0, 0] high[0, %pad_amt, 0] { | ||
^bb0(%a: index, %b: index, %c: index): | ||
tensor.yield %cst : f32 | ||
} : tensor<1x?x64xf32> to tensor<1x?x64xf32> | ||
|
||
return %padded : tensor<1x?x64xf32> | ||
} |
Uh oh!
There was an error while loading. Please reload this page.