From 25de54960ea4c537833d85f471169c43ad19e87b Mon Sep 17 00:00:00 2001 From: Han-Chung Wang Date: Mon, 16 Dec 2024 06:45:28 -0800 Subject: [PATCH] [NFC] Delete outdated e2e encoding tests. (#19487) It was introduced for GPU matmul data-tiling when the multi_mma code-generation was not ready yet. There is an e2e path in IREE (see tests/e2e/matmul/) now, so we no longer need the test suite. Signed-off-by: hanhanW --- tests/e2e/rocm_specific/BUILD.bazel | 24 --- tests/e2e/rocm_specific/CMakeLists.txt | 26 --- tests/e2e/rocm_specific/encoding.mlir | 232 ------------------------- 3 files changed, 282 deletions(-) delete mode 100644 tests/e2e/rocm_specific/BUILD.bazel delete mode 100644 tests/e2e/rocm_specific/CMakeLists.txt delete mode 100644 tests/e2e/rocm_specific/encoding.mlir diff --git a/tests/e2e/rocm_specific/BUILD.bazel b/tests/e2e/rocm_specific/BUILD.bazel deleted file mode 100644 index 438fb0728d67..000000000000 --- a/tests/e2e/rocm_specific/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2024 The IREE Authors -# -# Licensed under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -# Tests for end-to-end IREE support specific to the vulkan-spirv lowering. - -load("//build_tools/bazel:iree_check_test.bzl", "iree_check_single_backend_test_suite") - -package( - features = ["layering_check"], - licenses = ["notice"], # Apache 2.0 -) - -iree_check_single_backend_test_suite( - name = "check_rocm_hip", - srcs = ["encoding.mlir"], - compiler_flags = [ - "--iree-global-opt-experimental-rocm-data-tiling", - ], - driver = "hip", - target_backend = "rocm", -) diff --git a/tests/e2e/rocm_specific/CMakeLists.txt b/tests/e2e/rocm_specific/CMakeLists.txt deleted file mode 100644 index c428b12fc7f7..000000000000 --- a/tests/e2e/rocm_specific/CMakeLists.txt +++ /dev/null @@ -1,26 +0,0 @@ -################################################################################ -# Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from # -# tests/e2e/rocm_specific/BUILD.bazel # -# # -# Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary # -# CMake-only content. # -# # -# To disable autogeneration for this file entirely, delete this header. # -################################################################################ - -iree_add_all_subdirs() - -iree_check_single_backend_test_suite( - NAME - check_rocm_hip - SRCS - "encoding.mlir" - TARGET_BACKEND - "rocm" - DRIVER - "hip" - COMPILER_FLAGS - "--iree-global-opt-experimental-rocm-data-tiling" -) - -### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ### diff --git a/tests/e2e/rocm_specific/encoding.mlir b/tests/e2e/rocm_specific/encoding.mlir deleted file mode 100644 index 2718f9d00805..000000000000 --- a/tests/e2e/rocm_specific/encoding.mlir +++ /dev/null @@ -1,232 +0,0 @@ -//===----------------------------------------------------------------------===// -// Utility Methods -//===----------------------------------------------------------------------===// - -func.func private @generate_2D_source_f16(%height : index, %width : index) -> tensor { - %init_source = tensor.empty(%height, %width) : tensor - %source = linalg.generic { - indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - outs(%init_source : tensor) { - ^bb0(%b0 : f16): - %outer = linalg.index 0 : index - %inner = linalg.index 1 : index - %strided = arith.muli %outer, %width : index - %linearized = arith.addi %inner, %strided : index - %linearized_i16 = arith.index_cast %linearized : index to i16 - %linearized_f16 = arith.sitofp %linearized_i16 : i16 to f16 - linalg.yield %linearized_f16 : f16 - } -> tensor - // This blocks the fusion for inputs and testing ops. - %0 = util.optimization_barrier %source : tensor - %1 = flow.tensor.tie_shape %0 : tensor{%height, %width} - return %1 : tensor -} - -func.func private @generate_2D_source_f32(%height : index, %width : index) -> tensor { - %init_source = tensor.empty(%height, %width) : tensor - %source = linalg.generic { - indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - outs(%init_source : tensor) { - ^bb0(%b0 : f32): - %outer = linalg.index 0 : index - %inner = linalg.index 1 : index - %strided = arith.muli %outer, %width : index - %linearized = arith.addi %inner, %strided : index - %linearized_i32 = arith.index_cast %linearized : index to i32 - %linearized_f32 = arith.sitofp %linearized_i32 : i32 to f32 - linalg.yield %linearized_f32 : f32 - } -> tensor - // This blocks the fusion for inputs and testing ops. - %0 = util.optimization_barrier %source : tensor - %1 = flow.tensor.tie_shape %0 : tensor{%height, %width} - return %1 : tensor -} - -func.func private @generate_2D_source_i8(%height : index, %width : index) -> tensor { - %c255 = arith.constant 255 : index - %init_source = tensor.empty(%height, %width) : tensor - %source = linalg.generic { - indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - outs(%init_source : tensor) { - ^bb0(%b0 : i8): - %outer = linalg.index 0 : index - %inner = linalg.index 1 : index - %strided = arith.muli %outer, %width : index - %linearized = arith.addi %inner, %strided : index - %linearized_rem = arith.remsi %linearized, %c255 : index - %linearized_i8 = arith.index_cast %linearized_rem : index to i8 - linalg.yield %linearized_i8 : i8 - } -> tensor - // This blocks the fusion for inputs and testing ops. - %0 = util.optimization_barrier %source : tensor - %1 = flow.tensor.tie_shape %0 : tensor{%height, %width} - return %1 : tensor -} - -func.func private @generate_2D_source_i32(%height : index, %width : index) -> tensor { - %init_source = tensor.empty(%height, %width) : tensor - %source = linalg.generic { - indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - outs(%init_source : tensor) { - ^bb0(%b0 : i32): - %outer = linalg.index 0 : index - %inner = linalg.index 1 : index - %strided = arith.muli %outer, %width : index - %linearized = arith.addi %inner, %strided : index - %linearized_i32 = arith.index_cast %linearized : index to i32 - linalg.yield %linearized_i32 : i32 - } -> tensor - // This blocks the fusion for inputs and testing ops. - %0 = util.optimization_barrier %source : tensor - %1 = flow.tensor.tie_shape %0 : tensor{%height, %width} - return %1 : tensor -} - -//===----------------------------------------------------------------------===// -// f32.f32.f32 variants -//===----------------------------------------------------------------------===// - -#map = affine_map<(d0, d1, d2) -> (d0, d2)> -#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> -#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> -#encoding_f32f32f32_lhs = #iree_encoding.encoding> -#encoding_f32f32f32_rhs = #iree_encoding.encoding> -#encoding_f32f32f32_acc = #iree_encoding.encoding> - -func.func @set_encoding_f32f32f32_lhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f32(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf32> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf32> -> tensor<129x255xf32, #encoding_f32f32f32_lhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xf32, #encoding_f32f32f32_lhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf32, #encoding_f32f32f32_lhs> -> tensor<129x255xf32> - check.expect_almost_eq(%2, %source) : tensor<129x255xf32> - return -} - -func.func @set_encoding_f32f32f32_rhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f32(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf32> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf32> -> tensor<129x255xf32, #encoding_f32f32f32_rhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xf32, #encoding_f32f32f32_rhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf32, #encoding_f32f32f32_rhs> -> tensor<129x255xf32> - check.expect_almost_eq(%2, %source) : tensor<129x255xf32> - return -} - -func.func @set_encoding_f32f32f32_acc() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f32(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf32> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf32> -> tensor<129x255xf32, #encoding_f32f32f32_acc> - %barrire = util.optimization_barrier %1 : tensor<129x255xf32, #encoding_f32f32f32_acc> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf32, #encoding_f32f32f32_acc> -> tensor<129x255xf32> - check.expect_almost_eq(%2, %source) : tensor<129x255xf32> - return -} - -//===----------------------------------------------------------------------===// -// i8.i8.i32 variants -//===----------------------------------------------------------------------===// - -#encoding_i8i8i32_lhs = #iree_encoding.encoding> -#encoding_i8i8i32_rhs = #iree_encoding.encoding> -#encoding_i8i8i32_acc = #iree_encoding.encoding> - -func.func @set_encoding_i8i8i32_lhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_i8(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xi8> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xi8> -> tensor<129x255xi8, #encoding_i8i8i32_lhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xi8, #encoding_i8i8i32_lhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xi8, #encoding_i8i8i32_lhs> -> tensor<129x255xi8> - check.expect_eq(%2, %source) : tensor<129x255xi8> - return -} - -func.func @set_encoding_i8i8i32_rhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_i8(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xi8> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xi8> -> tensor<129x255xi8, #encoding_i8i8i32_rhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xi8, #encoding_i8i8i32_rhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xi8, #encoding_i8i8i32_rhs> -> tensor<129x255xi8> - check.expect_eq(%2, %source) : tensor<129x255xi8> - return -} - -func.func @set_encoding_i8i8i32_acc() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_i32(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xi32> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xi32> -> tensor<129x255xi32, #encoding_i8i8i32_acc> - %barrire = util.optimization_barrier %1 : tensor<129x255xi32, #encoding_i8i8i32_acc> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xi32, #encoding_i8i8i32_acc> -> tensor<129x255xi32> - check.expect_eq(%2, %source) : tensor<129x255xi32> - return -} - - -//===----------------------------------------------------------------------===// -// f16.f16.f32 variants -//===----------------------------------------------------------------------===// - -#encoding_f16f16f32_lhs = #iree_encoding.encoding> -#encoding_f16f16f32_rhs = #iree_encoding.encoding> -#encoding_f16f16f32_acc = #iree_encoding.encoding> - -func.func @set_encoding_f16f16f32_lhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f16(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf16> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf16> -> tensor<129x255xf16, #encoding_f16f16f32_lhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xf16, #encoding_f16f16f32_lhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf16, #encoding_f16f16f32_lhs> -> tensor<129x255xf16> - check.expect_eq(%2, %source) : tensor<129x255xf16> - return -} - -func.func @set_encoding_f16f16f32_rhs() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f16(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf16> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf16> -> tensor<129x255xf16, #encoding_f16f16f32_rhs> - %barrire = util.optimization_barrier %1 : tensor<129x255xf16, #encoding_f16f16f32_rhs> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf16, #encoding_f16f16f32_rhs> -> tensor<129x255xf16> - check.expect_eq(%2, %source) : tensor<129x255xf16> - return -} - -func.func @set_encoding_f16f16f32_acc() { - %height = arith.constant 129 : index - %width = arith.constant 255 : index - %0 = call @generate_2D_source_f32(%height, %width) : (index, index) -> tensor - %source = tensor.cast %0 : tensor to tensor<129x255xf32> - - %1 = iree_encoding.set_encoding %source : tensor<129x255xf32> -> tensor<129x255xf32, #encoding_f16f16f32_acc> - %barrire = util.optimization_barrier %1 : tensor<129x255xf32, #encoding_f16f16f32_acc> - %2 = iree_encoding.unset_encoding %1 : tensor<129x255xf32, #encoding_f16f16f32_acc> -> tensor<129x255xf32> - check.expect_eq(%2, %source) : tensor<129x255xf32> - return -}