Skip to content

Commit

Permalink
[Codegen] Remove depreciated vector distribution transform codegen pa…
Browse files Browse the repository at this point in the history
…th (iree-org#19233)

Removes iree_vector_ext.layout attribute for VectorExt and other related
passes which were required for the vector distribution transform dialect
path. This path is now depreciated.
  • Loading branch information
Groverkss authored Nov 21, 2024
1 parent e1ce3fa commit 6583762
Show file tree
Hide file tree
Showing 28 changed files with 209 additions and 3,589 deletions.

Large diffs are not rendered by default.

9 changes: 0 additions & 9 deletions compiler/src/iree/compiler/Codegen/Common/GPU/GPUPatterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,6 @@ void populateDropSharedMemoryDeallocOpPatterns(RewritePatternSet &patterns);

void populateGPUDistributionPatterns(RewritePatternSet &patterns);

void populateGPUDistributionLayoutAttrPatterns(Value laneId,
RewritePatternSet &patterns);

void populateGPUReductionDistributionPatterns(RewritePatternSet &patterns,
int64_t maxBitsPerShuffle = 32);

void populateGPUDistributeNestedLayoutAttrPatterns(
RewritePatternSet &patterns, Value threadId, int64_t subgroupSize,
int64_t maxBitsPerShuffle = 32);
Expand All @@ -46,9 +40,6 @@ void populateGPUDistributeNestedLayoutAttrPatterns(
void populateGPUDistributeNestedLayoutContractAMDGPUPatterns(
RewritePatternSet &patterns);

void populateGPULayoutResolutionDistributionPatterns(
RewritePatternSet &patterns);

} // namespace mlir::iree_compiler

#endif // IREE_COMPILER_CODEGEN_COMMON_GPUPATTERNS_H_

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -1120,14 +1120,10 @@ transform_dialect::TestGpuVectorDistribution::applyToOne(
rewriter.create<gpu::ThreadIdOp>(target.getLoc(), gpu::Dimension::x);

populateGPUDistributionPatterns(patterns);
populateGPUDistributionLayoutAttrPatterns(laneId, patterns);
populateGPUReductionDistributionPatterns(patterns);
// For testing we use subgroup size = 64.
populateGPUDistributeNestedLayoutAttrPatterns(patterns, laneId,
/*subgroupSize=*/64);
populateGPUDistributeNestedLayoutContractAMDGPUPatterns(patterns);
if (getExperimental())
populateGPULayoutResolutionDistributionPatterns(patterns);
if (failed(distributeVectorOps(target, patterns, options))) {
return emitDefaultDefiniteFailure(target);
}
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -25,219 +25,6 @@ namespace mlir::iree_compiler::IREE::VectorExt {

using VectorValue = TypedValue<VectorType>;

bool PerDimLayoutAttr::contains(const LayoutDimension &dim) {
for (LayoutDimensionAttr label : getLabels()) {
if (label.getValue() == dim)
return true;
}
return false;
}

std::optional<int64_t> PerDimLayoutAttr::getShape(const LayoutDimension &dim) {
for (auto value : llvm::zip(getLabels(), getShapes())) {
if (dim == std::get<0>(value).getValue())
return std::get<1>(value);
}
return std::nullopt;
}

std::optional<int64_t> LayoutAttr::getShape(const LayoutDimension &dim) const {
for (PerDimLayoutAttr layout : getLayouts()) {
std::optional<int64_t> maybeShape = layout.getShape(dim);
if (maybeShape)
return maybeShape.value();
}
return std::nullopt;
}

// Get the SIMT Vector shape in the order specified by dims. If no dims are
// specified, then return an empty vector.
LogicalResult LayoutAttr::isValidLayout(ShapedType shapeTy,
Location loc) const {
ArrayRef<int64_t> shape = shapeTy.getShape();
if (shape.size() != getRank()) {
return emitError(loc, "Rank of vector (")
<< shape.size() << ") does not match rank of layout (" << getRank()
<< ").";
}
for (auto [idx, layout] : llvm::enumerate(getLayouts())) {
ArrayRef<int64_t> layoutShape = layout.getShapes();
int64_t expectedShape =
std::reduce(layoutShape.begin(), layoutShape.end(),
static_cast<int64_t>(1), std::multiplies<int64_t>());
if (expectedShape != shape[idx]) {
std::string shapeStr;
llvm::raw_string_ostream shapeOs(shapeStr);
llvm::interleaveComma(shape, shapeOs);
std::string layoutStr;
llvm::raw_string_ostream layoutOs(layoutStr);
printStripped(layoutOs);
return emitError(loc, "Vector shape: [")
<< shapeStr << "] does not match the layout (" << layoutStr
<< ") at dim " << idx
<< ". Dimension expected by layout: " << expectedShape
<< " actual: " << shape[idx];
}
}
return success();
}

// Project out the layout for the specified dimensions
// resulting in the layout for a lower dimensional vector.
VectorLayoutInterface LayoutAttr::project(ArrayRef<bool> droppedDims) const {
assert(droppedDims.size() == getRank() &&
"droppedDims size must match layout size");

ArrayRef<PerDimLayoutAttr> layouts = getLayouts();
SmallVector<PerDimLayoutAttr> newLayouts;
for (auto pair : llvm::zip(droppedDims, layouts)) {
if (!std::get<0>(pair))
newLayouts.push_back(std::get<1>(pair));
}
return LayoutAttr::get(getContext(), newLayouts);
}

// Permute the layout according to the provided permutation
// vector. The dimensionality of the layout remains the same.
VectorLayoutInterface LayoutAttr::permute(ArrayRef<int64_t> permutation) const {
assert(permutation.size() == getRank() &&
"permutation size must match layout rank");

ArrayRef<PerDimLayoutAttr> layouts = getLayouts();
SmallVector<PerDimLayoutAttr> newLayouts;
for (unsigned index : permutation) {
assert(index >= 0 && index < getRank());
newLayouts.push_back(layouts[index]);
}
return LayoutAttr::get(getContext(), newLayouts);
}

// This function returns the distributed shape of the SIMT
// vector and evaluates it in the following order:
// BATCHX, BATCHY, VECTORY, VECTORX
// The vector dimensions are combined into a single SIMT
// vector dimension.
SmallVector<int64_t> LayoutAttr::getDistributedShape() const {
SmallVector<LayoutDimension> labels{
LayoutDimension::BATCHX, LayoutDimension::BATCHY,
LayoutDimension::VECTORY, LayoutDimension::VECTORX};
SmallVector<int64_t> simtVectorShape;
std::optional<int64_t> vectorShape;
for (LayoutDimension dim : labels) {
ArrayRef<PerDimLayoutAttr> layouts = getLayouts();
for (PerDimLayoutAttr layout : layouts) {
if (!layout.contains(dim))
continue;
int64_t shape = layout.getShape(dim).value();
if (isVectorDimension(dim)) {
vectorShape = shape * vectorShape.value_or(1);
continue;
}
simtVectorShape.push_back(shape);
}
}
if (vectorShape)
simtVectorShape.push_back(vectorShape.value());
return simtVectorShape;
}

PerDimLayoutAttr LayoutAttr::getDimLayout(int64_t dim) const {
assert(dim >= 0 && dim < getRank());
return getLayouts()[dim];
}

std::optional<int64_t> LayoutAttr::getBatchDim(int64_t dim) {
assert(dim < getRank());
PerDimLayoutAttr layout = getDimLayout(dim);
for (auto [name, shape] :
llvm::zip_equal(layout.getLabels(), layout.getShapes())) {
if (isBatchDimension(name.getValue()))
return shape;
}
return std::nullopt;
}

std::optional<int64_t> LayoutAttr::getLaneDim(int64_t dim) {
assert(dim < getRank());
PerDimLayoutAttr layout = getDimLayout(dim);
for (auto [name, shape] :
llvm::zip_equal(layout.getLabels(), layout.getShapes())) {
if (isLaneDimension(name.getValue()))
return shape;
}
return std::nullopt;
}

std::optional<LayoutDimension> LayoutAttr::getLane(int64_t dim) {
assert(dim < getRank());
PerDimLayoutAttr layout = getDimLayout(dim);
for (auto [name, shape] :
llvm::zip_equal(layout.getLabels(), layout.getShapes())) {
if (isLaneDimension(name.getValue()))
return name.getValue();
}
return std::nullopt;
}

int64_t LayoutAttr::getRank() const { return getLayouts().size(); }

std::tuple<int64_t, int64_t, int64_t> LayoutAttr::getLaneGrid() {
int64_t laneX = 1;
int64_t laneY = 1;
int64_t laneZ = 1;
for (PerDimLayoutAttr dimLayout : getLayouts()) {
// Note that valid layouts only include at most one instance of each
// dimension type, so this is simply doing assignment on the first instance
// of each lane index, not an accumulative product.
auto maybeXShape = dimLayout.getShape(LayoutDimension::LANEX);
laneX *= maybeXShape.value_or(1);
auto maybeYShape = dimLayout.getShape(LayoutDimension::LANEY);
laneY *= maybeYShape.value_or(1);
auto maybeZShape = dimLayout.getShape(LayoutDimension::LANEZ);
laneZ *= maybeZShape.value_or(1);
}
return std::make_tuple(laneX, laneY, laneZ);
}

uint64_t LayoutAttr::getShuffleOffset(int64_t reductionDim) {
uint64_t offset = 0;
std::optional<LayoutDimension> laneDim = getLane(reductionDim);
if (!laneDim)
return offset;
switch (laneDim.value()) {
case LayoutDimension::LANEX:
offset = 1;
break;
case LayoutDimension::LANEY:
offset = getShape(LayoutDimension::LANEX).value_or(0);
break;
case LayoutDimension::LANEZ:
offset = getShape(LayoutDimension::LANEX).value_or(0) *
getShape(LayoutDimension::LANEY).value_or(0);
break;
default:
assert(false && "Invalid dimension! Expected lane dimension");
break;
}
return offset;
}

bool LayoutAttr::hasLaneConflictWith(const LayoutAttr &other) {
SmallVector<LayoutDimension> laneDims{
LayoutDimension::LANEX, LayoutDimension::LANEY, LayoutDimension::LANEZ};
for (LayoutDimension dim : laneDims) {
std::optional<int64_t> shape = getShape(dim);
std::optional<int64_t> otherShape = other.getShape(dim);
if ((shape && !otherShape) || (!shape && otherShape))
return true;
if (shape && otherShape) {
if (shape.value() != otherShape.value())
return true;
}
}
return false;
}

// Project the nested layout. This take a mask on the dimensions of the vector
// associated with this layout and projects out those dimensions. This reduces
// the rank of the layout in the process.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,98 +13,6 @@ include "iree/compiler/Codegen/Dialect/VectorExt/IR/VectorExtBase.td"
// Vector layout attributes
//===---------------------------------------------------------------------===//

// Defines the batch dimensions for the original SIMD tensor.
// By convention, X is along rows and Y along columns.
def BATCHX : I32EnumAttrCase<"BATCHX", 0>;
def BATCHY : I32EnumAttrCase<"BATCHY", 1>;
// Defines the vector dimension.
def VECTORX : I32EnumAttrCase<"VECTORX", 2>;
def VECTORY : I32EnumAttrCase<"VECTORY", 3>;
def VECTORZ : I32EnumAttrCase<"VECTORZ", 4>;
// Defines the lane dimensions.
def LANEX : I32EnumAttrCase<"LANEX", 5>;
def LANEY : I32EnumAttrCase<"LANEY", 6>;
def LANEZ : I32EnumAttrCase<"LANEZ", 7>;

def LayoutDimension : IREEVectorExt_I32EnumAttr<"LayoutDimension",
"Describes the dimension of the high-dimensional layout", [
BATCHX,
BATCHY,
VECTORX,
VECTORY,
VECTORZ,
LANEX,
LANEY,
LANEZ,
]>;

def LayoutDimensionAttr : IREEVectorExt_EnumAttr<LayoutDimension, "dimension">;

def PerDimLayoutAttr : IREEVectorExt_Attr<"PerDimLayout"> {
let mnemonic = "per_dim_layout";
let summary = [{high-dimensional vector register layout for a given vector dimension}];
let description = [{
This attribute describes the per dimension register layout for a given vector
that could be prescribed by an operator such as matrix multiplication.
This is a way to explicitly represent the layout in the IR
when it is in the SIMD form prior to converting to the SIMT form so that
we can reason about layouts, propagating layouts and layout conflicts.
}];
let parameters = (ins
ArrayRefParameter<"LayoutDimensionAttr", "labels for the high dimensional layout dims">:$labels,
ArrayRefParameter<"int64_t", "shapes for the high dimensional layout dims">:$shapes
);
let assemblyFormat = "`<``[` $labels `]``,` `[` $shapes `]``>`";
let genVerifyDecl = 0;
let extraClassDeclaration = [{
std::optional<int64_t> getShape(const LayoutDimension &dim);
bool contains(const LayoutDimension &dim);
}];
}

def LayoutAttr : IREEVectorExt_Attr<"Layout",
[ DeclareAttrInterfaceMethods<VectorLayoutInterface> ]> {
let mnemonic = "layout";
let summary = [{high-dimensional vector register layout for a given vector}];
let description = [{
This contains a complete specification of the layout for a given vector,
whereas the attribute above only specifies the per dimension layout.
}];
let parameters = (ins
ArrayRefParameter<"PerDimLayoutAttr", "layout for each dimension of the vector">:$layouts
);
let assemblyFormat = "`<`$layouts`>`";
let genVerifyDecl = 0;
let extraClassDeclaration = [{
// Get the shape for a given layout dimension.
std::optional<int64_t> getShape(const LayoutDimension &dim) const;
std::optional<int64_t> getBatchDim(int64_t dim);
// Get the lane dimension shape for a provided simd tensor dim.
std::optional<int64_t> getLaneDim(int64_t dim);
// Get the lane dimension for a provided simd tensor dim.
std::optional<LayoutDimension> getLane(int64_t dim);

// Returns the grid of lane ids. Assumes a valid layout.
::std::tuple<int64_t, int64_t, int64_t> getLaneGrid();
PerDimLayoutAttr getDimLayout(int64_t dim) const;

// Given the reduction dim, computes the shuffle offset
// based on the shapes of the lane dimensions. The shuffle
// offset is used during the thread global reduction
// when emitting a gpu::ShuffleOp and follows
// the semantics of the offset operand defined there,
// which is that for lane k, the shuffle op returns the
// value from lane k ^ offset.
uint64_t getShuffleOffset(int64_t reductionDim);

// Determines whether the other layout has a lane
// dimension that the current layout does not have OR whether
// the shape of the two layouts for a common lane dimension
// is not the same.
bool hasLaneConflictWith(const LayoutAttr &other);
}];
}

def NestedLayoutAttr : IREEVectorExt_Attr<"NestedLayout",
[ DeclareAttrInterfaceMethods<VectorLayoutInterface> ]> {
let mnemonic = "nested_layout";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@ namespace mlir::iree_compiler::IREE::VectorExt {
struct IREEVectorExtDialectOpAsmInterface : public OpAsmDialectInterface {
using OpAsmDialectInterface::OpAsmDialectInterface;
AliasResult getAlias(Attribute attr, raw_ostream &os) const override {
if (llvm::isa<LayoutAttr>(attr)) {
os << "layout";
return AliasResult::OverridableAlias;
}
if (llvm::isa<NestedLayoutAttr>(attr)) {
os << "nested";
return AliasResult::OverridableAlias;
Expand Down
Loading

0 comments on commit 6583762

Please sign in to comment.