Skip to content

Commit

Permalink
Drop unused functions, but do not drop -Wno-unused-function (iree-o…
Browse files Browse the repository at this point in the history
…rg#15502)

This is take two on iree-org#15471, now skips straight to the dessert, no
veggies for @benvanik .
  • Loading branch information
bjacob authored Nov 9, 2023
1 parent bc98b9a commit d991dc9
Show file tree
Hide file tree
Showing 32 changed files with 0 additions and 624 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -460,42 +460,6 @@ static void hasDestructiveUpdatePattern(Value source, BufferizationPlan &plan) {
}
}

/// Ties together operands for operand fusion as exists today by reusing buffer
/// for the result for one of the inputs to do in-place update. Ideally we dont
/// need to do this if the fusion just happens at vector level. To be removed
/// when that is worked out and can be load-bearing. Conditions checked here are
/// 1) the result does not use the value of the `outs` buffer.
/// 2) the input has a single use (this op) and has the same indexing map as the
/// result.
/// 3) the input equivalence set does not have an interface binding, i.e. it is
/// not using a buffer from the dispatch ABI.
static void tieOperandsForOperandFusion(linalg::LinalgOp linalgOp,
BufferizationPlan &plan) {
for (auto [index, result] : llvm::enumerate(linalgOp.getDpsInitsMutable())) {
if (linalgOp.payloadUsesValueFromOperand(&result)) {
continue;
}
for (OpOperand *input : linalgOp.getDpsInputOperands()) {
auto tensorType =
llvm::dyn_cast<RankedTensorType>(input->get().getType());
if (!tensorType)
continue;
Type inputElementType = tensorType.getElementType();
Type resultElementType =
llvm::cast<RankedTensorType>(result.get().getType()).getElementType();
if (input->get().hasOneUse() && (inputElementType == resultElementType) &&
linalgOp.getMatchingIndexingMap(input) ==
linalgOp.getMatchingIndexingMap(&result) &&
!getEquivalentOpOfType<IREE::HAL::InterfaceBindingSubspanOp>(
input->get(), plan) &&
!isFromReadOnlyTensor(input->get(), plan)) {
plan.unionSets(linalgOp->getResult(index), input->get());
break;
}
}
}
}

void BufferizationPlan::unionSets(Value v1, Value v2) {
if (!canSetsBeMerged(v1, v2, *this)) {
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -705,23 +705,6 @@ struct FoldMemRefReshape final : public OpConversionPattern<ReshapeOpTy> {
};
};

/// Returns the number of bytes of the given `type`. Returns std::nullopt if
/// cannot deduce.
///
/// Note that this should be kept consistent with how the byte offset was
/// calculated in the subspan ops!
std::optional<int64_t> getNumBytes(Type type) {
if (type.isIntOrFloat())
return IREE::Util::getRoundedElementByteWidth(type);
if (auto vectorType = llvm::dyn_cast<VectorType>(type)) {
auto elementBytes = getNumBytes(vectorType.getElementType());
if (!elementBytes)
return std::nullopt;
return elementBytes.value() * vectorType.getNumElements();
}
return std::nullopt;
}

/// Erase alignment hints.
struct RemoveAssumeAlignOp
: public OpRewritePattern<memref::AssumeAlignmentOp> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,6 @@
namespace mlir {
namespace iree_compiler {

/// Gets the given `attrOrValue` as a Value by creating constant ops for
/// attributes.
static Value getAsValue(OpFoldResult attrOrValue, OpBuilder &builder,
Location loc) {
if (Value val = attrOrValue.dyn_cast<Value>())
return val;
auto attr = llvm::cast<IntegerAttr>(attrOrValue.get<Attribute>());
return builder.create<arith::ConstantIndexOp>(loc, attr.getInt());
}

#ifndef NDEBUG
inline raw_ostream &operator<<(raw_ostream &os,
const LoopTilingAndDistributionInfo &info) {
Expand Down
14 changes: 0 additions & 14 deletions compiler/src/iree/compiler/Codegen/Common/GPU/GPUTensorAlloc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,20 +80,6 @@ static bool transposeOpFilter(Operation *op) {
return opInfo.isTranspose();
}

/// Returns true if the index map represents a transpose that benefits from
/// shared mem.
static bool isSharedMemTranspose(AffineMap indexMap) {
if (!indexMap.isEmpty() && indexMap.isPermutation()) {
// Ensure that the fasted moving dimension (the last one) is permuted,
// Otherwise shared memory promotion will not benefit the operation.
if (indexMap.getDimPosition(indexMap.getNumDims() - 1) !=
indexMap.getNumDims() - 1) {
return true;
}
}
return false;
}

namespace {
/// Swaps bufferization.alloc_tensor with the copied linalg op result when the
/// linalg op does not use the output initial value during calculation.
Expand Down
43 changes: 0 additions & 43 deletions compiler/src/iree/compiler/Codegen/Common/GenericVectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,49 +25,6 @@ namespace mlir {
namespace iree_compiler {
namespace {

/// Returns the op that contains lowering config. Checks whether the provided op
/// contains the lowering config and returns it. Otherwise, tries to find the
/// lowering config across the function. If there are multiple ops with the same
/// lowering configs, returns the first one found. Returns failure if there are
/// multiple op with different lowering config.
static FailureOr<Operation *> getRootOp(Operation *op) {
// Check for self first.
if (iree_compiler::getLoweringConfig(op)) {
return op;
}

// Get the function op.
auto funcOp = dyn_cast<func::FuncOp>(op);
if (!funcOp) {
funcOp = op->getParentOfType<func::FuncOp>();
}

assert(funcOp && "Missing funcOp");

Operation *rootOp = nullptr;
mlir::iree_compiler::IREE::Codegen::LoweringConfigAttr rootLoweringConfig;
auto result = funcOp.walk([&](Operation *op) -> WalkResult {
auto loweringConfig = iree_compiler::getLoweringConfig(op);
if (!loweringConfig) {
return WalkResult::advance();
}
if (rootLoweringConfig) {
if (rootLoweringConfig != loweringConfig) {
return WalkResult::interrupt();
}
} else {
rootOp = op;
rootLoweringConfig = loweringConfig;
}
return WalkResult::advance();
});

if (!rootOp || result.wasInterrupted()) {
return failure();
}
return rootOp;
}

/// Tries to infer the vector sizes from an IR using ValueBounds analysis.
/// Returns failure if vector sizes can't be inferred.
static FailureOr<SmallVector<int64_t>>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,6 @@ class IREEComprehensiveBufferizePass
};
} // namespace

static bool isaTensor(Type t) { return llvm::isa<TensorType>(t); };

// Default allocation functions.
static FailureOr<Value> defaultAllocationFn(OpBuilder &builder, Location loc,
MemRefType allocationType,
Expand Down
12 changes: 0 additions & 12 deletions compiler/src/iree/compiler/Codegen/Common/RemoveTrivialLoops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,18 +91,6 @@ getWorkgroupRange(Value processorValue, SmallVectorImpl<Value> & /*dims*/,
return std::nullopt;
}

/// Return true if the given tiled loop is distributed to workgroups.
static bool isWorkgroupLoop(const LoopTilingAndDistributionInfo &info) {
auto forOp = cast<scf::ForOp>(info.loop);
Operation *lbOp = forOp.getLowerBound().getDefiningOp();
if (isa<IREE::HAL::InterfaceWorkgroupIDOp>(lbOp))
return true;
auto applyOp = dyn_cast<affine::AffineApplyOp>(lbOp);
return applyOp && llvm::any_of(applyOp.getMapOperands(), [](Value operand) {
return operand.getDefiningOp<IREE::HAL::InterfaceWorkgroupIDOp>();
});
}

static LogicalResult removeOneTripTiledLoops(func::FuncOp funcOp,
ArrayRef<int64_t> workgroupSize,
ArrayRef<int64_t> numWorkgroups) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,26 +120,6 @@ getTileAndDistributeConfig(ArrayRef<Operation *> computeOps,
return success();
}

/// Get the materialization information from a `tensor.pack` operation.
static FailureOr<IREE::LinalgExt::MaterializeEncodingInfo>
getMaterializationInfo(tensor::PackOp packOp) {
IREE::LinalgExt::MaterializeEncodingInfo encodingInfo;
SmallVector<OpFoldResult> mixedTileSizes = packOp.getMixedTiles();
encodingInfo.innerTileSizes.reserve(mixedTileSizes.size());
for (auto tileSize : mixedTileSizes) {
if (tileSize.is<Value>()) {
encodingInfo.innerTileSizes.push_back(ShapedType::kDynamic);
} else {
encodingInfo.innerTileSizes.push_back(
llvm::cast<IntegerAttr>(tileSize.get<Attribute>()).getInt());
}
}
encodingInfo.innerDimsPos = llvm::to_vector(packOp.getInnerDimsPos());
encodingInfo.outerDimsPerm = llvm::to_vector(packOp.getOuterDimsPerm());
encodingInfo.srcRank = packOp.getSourceRank();
return encodingInfo;
}

//===---------------------------------------------------------------------===//
// Patterns to lower operations that are used to compute the number of
// workgroups.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,6 @@ getPartitionableLoopsImpl(linalg::LinalgOp linalgOp,
return parallelLoops;
}

static llvm::SmallVector<utils::IteratorType>
getIteratorTypesFromAttr(ArrayAttr iteratorTypesAttr) {
return llvm::map_to_vector(iteratorTypesAttr, [](Attribute attr) {
return utils::symbolizeIteratorType(llvm::cast<StringAttr>(attr).getValue())
.value();
});
}

/// External model implementation for all LinalgOps.
template <typename OpTy>
struct LinalgOpPartitionableLoops
Expand Down
19 changes: 0 additions & 19 deletions compiler/src/iree/compiler/Codegen/LLVMGPU/KernelConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -681,25 +681,6 @@ static LogicalResult setRootDefaultConfig(func::FuncOp entryPoint,
passPipeline, workgroupSize);
}

/// Return the size of the given dimension in the linalg op.
// TODO: this should be part of LinalgOp interface, the equivalent member
// function currently only support the case where all the dimensions are static
// while we want to support dynamic shapes.
static std::optional<int64_t> getLinalgDimSize(linalg::LinalgOp op, int64_t d) {
for (auto [mapIdx, map] : llvm::enumerate(op.getIndexingMapsArray())) {
for (auto [dimIdx, dim] : llvm::enumerate(map.getResults())) {
auto expr = dim.dyn_cast<AffineDimExpr>();
if (expr && expr.getPosition() == d) {
auto type = llvm::cast<ShapedType>(op->getOperand(mapIdx).getType());
if (type.isDynamicDim(dimIdx))
return std::nullopt;
return type.getDimSize(dimIdx);
}
}
}
return std::nullopt;
}

/// Set configuration for transform dialect based strategies.
static LogicalResult setTransformDialectConfig(func::FuncOp entryPoint,
Operation *op,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -756,15 +756,6 @@ static int isSingleLaneIdReduced(std::array<int, 4> &order) {
return count == 1;
}

static int getVecSizes(std::array<int, 4> &order, const Layout &layout) {
int size = 1;
for (int i = 0; i < 4; i++) {
if (isVectorId(i))
size *= layout.shape[i];
}
return size;
}

using bodyType = std::function<void(std::array<int, DimType::NumDims> &)>;

/// This function iterates over the dimensions of a given column/row order
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,23 +138,6 @@ std::pair<Value, Value> mlir::iree_compiler::cpu::buildCommonTrailingStrategy(
// user-friendliness.
//===----------------------------------------------------------------------===//

/// Placeholder to encode fixed reductions that should take finer-grained
/// precedence over other heuristics. In the future, this could be lifted to
/// e.g. `cpuModel` or higher up in some transform dialect database summary of
/// "known good things".
static FailureOr<ReductionConfig> applyKnownGoodReductionConfigurations(
const transform_ext::MatchedReductionCaptures &captures,
const CPUModel &cpuModel) {
int64_t reductionSize = captures.reductionOpSizes.back();
if (cpuModel.model == CPUModel::kDefaultCPU) {
if (captures.reductionOutputElementalTypeBitWidth == 32) {
if (reductionSize == 32)
return ReductionConfig{/*vectorSize=*/32};
}
}
return failure();
}

static ReductionConfig
getReductionConfig(const transform_ext::MatchedReductionCaptures &captures,
const CPUModel &cpuModel) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -390,41 +390,6 @@ struct ReductionSplitResult {
};
} // namespace

/// Builds transform IR requesting to bubble up the "expand_shape" operation
/// produced as parent of reduction splitting if necessary for fusion of the
/// leading elementwise operation.
// TODO: consider passing a problem-specific struct to control information.
static ReductionSplitResult
createBubbleExpand(ImplicitLocOpBuilder &b, Value variantH,
SplitReductionOp splitReductionTransformOp,
bool hasLeadingEltwise, bool hasTrailingEltwise) {
ReductionSplitResult result;
if (!hasLeadingEltwise) {
result.splitFillH = splitReductionTransformOp.getFillOp();
result.splitLinalgH = splitReductionTransformOp.getSplitLinalgOp();
result.combinerH = splitReductionTransformOp.getCombiningLinalgOp();
return result;
}

auto funcH = b.create<MatchOp>(variantH, func::FuncOp::getOperationName());
b.create<transform::ApplyPatternsOp>(funcH, [](OpBuilder &b, Location loc) {
b.create<
iree_compiler::IREE::transform_dialect::ApplyBubbleExpandPatternsOp>(
loc);
});
std::tie(result.originalFillH, result.splitFillH) =
matchAndUnpack<2>(b, variantH, linalg::FillOp::getOperationName());
if (hasTrailingEltwise) {
std::tie(result.leadingEltwiseH, result.splitLinalgH, result.combinerH,
result.trailingEltwiseH) =
matchAndUnpack<4>(b, variantH, linalg::GenericOp::getOperationName());
} else {
std::tie(result.leadingEltwiseH, result.splitLinalgH, result.combinerH) =
matchAndUnpack<3>(b, variantH, linalg::GenericOp::getOperationName());
}
return result;
}

/// Build transform IR to split the reduction into a parallel and combiner part.
/// Then tile the parallel part and map it to `tileSize` threads, each reducing
/// on `vectorSize` elements.
Expand Down
17 changes: 0 additions & 17 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,23 +132,6 @@ struct ReplaceOpIfTensorOperandEmpty : public OpRewritePattern<Op> {
}
};

// Turns a tensor type that may have one or more dynamic dimensions into a
// static type with dynamic dimensions replaced with 0.
// Example: tensor<?x0x1xf32> -> tensor<0x0x1xf32>
static Type makeZeroElementsStaticTensorType(Type type) {
auto tensorType = llvm::cast<RankedTensorType>(type);
if (tensorType.hasStaticShape())
return type;
SmallVector<int64_t> dims;
dims.resize(tensorType.getRank());
for (int64_t i = 0; i < tensorType.getRank(); ++i) {
int64_t dim = tensorType.getDimSize(i);
dims[i] = dim == ShapedType::kDynamic ? 0 : dim;
}
return RankedTensorType::get(dims, tensorType.getElementType(),
tensorType.getEncoding());
}

// Returns a new set of dynamic dimensions for a shape carrying op when a type
// is being changed. This attempts to reuse the existing dimension values if
// they are available and will drop/insert new ones as required.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,15 +158,6 @@ static void appendToFusionGroup(Operation *op, ArrayRef<int64_t> newGroups) {
fusionGroups.append(newGroups.begin(), newGroups.end());
op->setAttr(kFusionGroupsAttr, Builder(op).getI64ArrayAttr(fusionGroups));
}
/// Returns true if the given `op` is in the `targetGroup` fusion group.
static bool isInFusionGroup(Operation *op, unsigned targetGroup) {
if (ArrayAttr opGroupAttr = op->getAttrOfType<ArrayAttr>(kFusionGroupsAttr)) {
return llvm::any_of(opGroupAttr, [&targetGroup](Attribute attr) {
return llvm::cast<IntegerAttr>(attr).getInt() == targetGroup;
});
}
return false;
}
/// Removes the fusion groups attribute.
static void removeFusionGroupsAttribute(Operation *op) {
op->removeAttr(kFusionGroupsAttr);
Expand Down
Loading

0 comments on commit d991dc9

Please sign in to comment.