diff --git a/tuner/examples/simple/simple_tuner.py b/tuner/examples/simple/simple_tuner.py index 9cc962427..43d230369 100644 --- a/tuner/examples/simple/simple_tuner.py +++ b/tuner/examples/simple/simple_tuner.py @@ -75,7 +75,7 @@ def main(): path_config = libtuner.PathConfig() path_config.base_dir.mkdir(parents=True, exist_ok=True) - path_config._set_tune_output(Path("autotune_output.txt")) + # path_config._set_tune_output(Path("autotune_output.txt")) # TODO(Max191): Make candidate_trackers internal to TuningClient. candidate_trackers: list[libtuner.CandidateTracker] = [] stop_after_phase: str = args.stop_after @@ -115,6 +115,8 @@ def main(): return print("Benchmarking compiled dispatch candidates...") + with open(path_config.output_dir, "w") as file: + file.write(f"Summarization about top dispatch candidates:\n") simple_tuner.benchmark_flags = ["--input=1", "--benchmark_repetitions=3"] top_candidates = libtuner.benchmark( args, @@ -124,6 +126,10 @@ def main(): simple_tuner, args.simple_num_dispatch_candidates, ) + with open(path_config.output_dir, "a") as file: + file.write(f"Top dispatch candidates: {top_candidates}\n") + for id in top_candidates: + file.write(f"{candidate_trackers[id].spec_path.resolve()}\n") if stop_after_phase == libtuner.ExecutionPhases.benchmark_dispatches: return @@ -142,6 +148,8 @@ def main(): return print("Benchmarking compiled model candidates...") + with open(path_config.output_dir, "a") as file: + file.write(f"Summarization about top model candidates:\n") simple_tuner.benchmark_flags = model_benchmark_flags simple_tuner.benchmark_timeout = 60 top_model_candidates = libtuner.benchmark( @@ -152,18 +160,14 @@ def main(): simple_tuner, args.simple_num_model_candidates, ) - + with open(path_config.output_dir, "a") as file: + file.write(f"Top model candidates: {top_model_candidates}\n") + for id in top_model_candidates: + file.write(f"{candidate_trackers[id].spec_path.resolve()}\n") print(f"Top model candidates: {top_model_candidates}") print("Check the detailed execution logs in:") print(path_config.run_log.resolve()) print("Check the tuning results in:") - print(path_config.tune_output.resolve()) - with open(path_config.tune_output, "w") as file: - file.write(f"Top dispatch candidates: {top_candidates}\n") - for id in top_candidates: - file.write(f"{candidate_trackers[id].spec_path.resolve()}\n") - file.write(f"Top model candidates: {top_model_candidates}\n") - for id in top_model_candidates: - file.write(f"{candidate_trackers[id].spec_path.resolve()}\n") + print(path_config.output_dir.resolve()) diff --git a/tuner/tuner/libtuner.py b/tuner/tuner/libtuner.py index 4aae8b63f..5c7af1086 100644 --- a/tuner/tuner/libtuner.py +++ b/tuner/tuner/libtuner.py @@ -76,10 +76,10 @@ class PathConfig: candidates_dir: Path = field(init=False) compiled_dir: Path = field(init=False) specs_dir: Path = field(init=False) + output_dir: Path = field(init=False) # To be set outside of class run_log: Optional[Path] = field(init=False, default=None) - tune_output: Optional[Path] = field(init=False, default=None) def __post_init__(self): object.__setattr__(self, "base_dir", self._name_base_dir()) @@ -87,6 +87,7 @@ def __post_init__(self): object.__setattr__(self, "candidates_dir", self.base_dir / "candidates") object.__setattr__(self, "compiled_dir", self.candidates_dir / "compiled") object.__setattr__(self, "specs_dir", self.candidates_dir / "specs") + object.__setattr__(self, "output_dir", self.base_dir / "summary.log") def _name_base_dir(self) -> Path: timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M") @@ -96,9 +97,6 @@ def _name_base_dir(self) -> Path: def _set_run_log(self, run_log: Path): object.__setattr__(self, "run_log", run_log) - def _set_tune_output(self, tune_output: Path): - object.__setattr__(self, "tune_output", self.base_dir / tune_output) - def get_candidate_spec_filename(self, candidate_id: int) -> str: return f"{candidate_id}_spec.mlir" @@ -725,6 +723,12 @@ def generate_candidate_specs( return candidates +def get_compilation_success_rate(compiled_candiates: list[Any]) -> float: + successful_candidates = [c for c in compiled_candiates if c is not None] + success_rate = float(len(successful_candidates)) / float(len(compiled_candiates)) + return success_rate + + def collision_handler(index_hash_list: list[tuple[int, str]]) -> tuple[bool, list[int]]: """If a collision is found, generate a list of new indexes. If no collision, `unique_indexes = []`""" # Check if candidate produces tbe same .vmfb @@ -804,11 +808,11 @@ def compile( compiled_candidates = multiprocess_progress_wrapper( num_worker=num_worker, task_list=task_list, function=run_iree_compile_command ) - compiled_candidates = [c for c in compiled_candidates if c is not None] - success_rate = float(len(compiled_candidates)) / float(len(task_list)) + success_rate = get_compilation_success_rate(compiled_candidates) logging.info( f"Successfully compiled [{len(compiled_candidates)}] candidates. Success rate: {success_rate:.2f}" ) + compiled_candidates = [c for c in compiled_candidates if c is not None] # Remove duplicate vmfbs from the candidate list. compiled_candidate_hashes = [] @@ -830,6 +834,7 @@ def select_best_benchmark_results( candidate_results: list[BenchmarkResult], baseline_results: list[BenchmarkResult], num_candidates: Optional[int], + path_config: Optional[PathConfig] = None, ) -> list[BenchmarkResult]: filtered_candidate_results = [r for r in candidate_results if math.isfinite(r.time)] if len(filtered_candidate_results) == 0: @@ -874,12 +879,19 @@ def get_speedup(result: BenchmarkResult) -> float: ] logging.info(f"Selected top[{len(best_results)}]:") + results = [] for r in best_results: if fallback_baseline_time is not None: speedup = f"{round(get_speedup(r) * 100, 2)}% of baseline" else: speedup = "baseline unavailable" - logging.info(f"Candidate {r.candidate_id} time: {r.time:.2f} ({speedup})") + result = f"Candidate {r.candidate_id} time: {r.time:.2f} ({speedup})" + logging.info(result) + results.append(result) + + if path_config is not None: + with open(path_config.output_dir, "a") as file: + file.writelines(f"{result}\n" for result in results) return best_results @@ -935,6 +947,7 @@ def benchmark( candidate_results=candidate_results, baseline_results=baseline_results, num_candidates=num_candidates, + path_config=path_config, ) top_candidates = [result.candidate_id for result in best_results] diff --git a/tuner/tuner/libtuner_test.py b/tuner/tuner/libtuner_test.py index cad57a3cd..43e048f0e 100644 --- a/tuner/tuner/libtuner_test.py +++ b/tuner/tuner/libtuner_test.py @@ -176,6 +176,17 @@ def test_validate_devices_with_invalid_device() -> None: assert expected_call in mock_handle_error.call_args_list +def test_get_compilation_success_rate(): + compiled_candidates = [0, None, 2, None, 4] + assert libtuner.get_compilation_success_rate(compiled_candidates) == 3.0 / 5.0 + + compiled_candidates = [0, 1, 2, 3, 4] + assert libtuner.get_compilation_success_rate(compiled_candidates) == 1.0 + + compiled_candidates = [None, None, None] + assert libtuner.get_compilation_success_rate(compiled_candidates) == 0.0 + + def test_select_best_benchmark_results() -> None: candidate_results = [ libtuner.BenchmarkResult(1, 0.5, "hip://0"),