Skip to content

Commit

Permalink
address reviewer comments
Browse files Browse the repository at this point in the history
Signed-off-by: Bangtian Liu <liubangtian@gmail.com>
  • Loading branch information
bangtianliu committed Jan 13, 2025
1 parent 261ad2e commit 083ca1c
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 17 deletions.
24 changes: 14 additions & 10 deletions tuner/examples/simple/simple_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def main():

path_config = libtuner.PathConfig()
path_config.base_dir.mkdir(parents=True, exist_ok=True)
path_config._set_tune_output(Path("autotune_output.txt"))
# path_config._set_tune_output(Path("autotune_output.txt"))
# TODO(Max191): Make candidate_trackers internal to TuningClient.
candidate_trackers: list[libtuner.CandidateTracker] = []
stop_after_phase: str = args.stop_after
Expand Down Expand Up @@ -115,6 +115,8 @@ def main():
return

print("Benchmarking compiled dispatch candidates...")
with open(path_config.output_dir, "w") as file:
file.write(f"Summarization about top dispatch candidates:\n")
simple_tuner.benchmark_flags = ["--input=1", "--benchmark_repetitions=3"]
top_candidates = libtuner.benchmark(
args,
Expand All @@ -124,6 +126,10 @@ def main():
simple_tuner,
args.simple_num_dispatch_candidates,
)
with open(path_config.output_dir, "a") as file:
file.write(f"Top dispatch candidates: {top_candidates}\n")
for id in top_candidates:
file.write(f"{candidate_trackers[id].spec_path.resolve()}\n")
if stop_after_phase == libtuner.ExecutionPhases.benchmark_dispatches:
return

Expand All @@ -142,6 +148,8 @@ def main():
return

print("Benchmarking compiled model candidates...")
with open(path_config.output_dir, "a") as file:
file.write(f"Summarization about top model candidates:\n")
simple_tuner.benchmark_flags = model_benchmark_flags
simple_tuner.benchmark_timeout = 60
top_model_candidates = libtuner.benchmark(
Expand All @@ -152,18 +160,14 @@ def main():
simple_tuner,
args.simple_num_model_candidates,
)

with open(path_config.output_dir, "a") as file:
file.write(f"Top model candidates: {top_model_candidates}\n")
for id in top_model_candidates:
file.write(f"{candidate_trackers[id].spec_path.resolve()}\n")
print(f"Top model candidates: {top_model_candidates}")

print("Check the detailed execution logs in:")
print(path_config.run_log.resolve())

print("Check the tuning results in:")
print(path_config.tune_output.resolve())
with open(path_config.tune_output, "w") as file:
file.write(f"Top dispatch candidates: {top_candidates}\n")
for id in top_candidates:
file.write(f"{candidate_trackers[id].spec_path.resolve()}\n")
file.write(f"Top model candidates: {top_model_candidates}\n")
for id in top_model_candidates:
file.write(f"{candidate_trackers[id].spec_path.resolve()}\n")
print(path_config.output_dir.resolve())
27 changes: 20 additions & 7 deletions tuner/tuner/libtuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,17 +76,18 @@ class PathConfig:
candidates_dir: Path = field(init=False)
compiled_dir: Path = field(init=False)
specs_dir: Path = field(init=False)
output_dir: Path = field(init=False)

# To be set outside of class
run_log: Optional[Path] = field(init=False, default=None)
tune_output: Optional[Path] = field(init=False, default=None)

def __post_init__(self):
object.__setattr__(self, "base_dir", self._name_base_dir())
object.__setattr__(self, "template_mlir", self.base_dir / "template.mlir")
object.__setattr__(self, "candidates_dir", self.base_dir / "candidates")
object.__setattr__(self, "compiled_dir", self.candidates_dir / "compiled")
object.__setattr__(self, "specs_dir", self.candidates_dir / "specs")
object.__setattr__(self, "output_dir", self.base_dir / "summary.log")

def _name_base_dir(self) -> Path:
timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M")
Expand All @@ -96,9 +97,6 @@ def _name_base_dir(self) -> Path:
def _set_run_log(self, run_log: Path):
object.__setattr__(self, "run_log", run_log)

def _set_tune_output(self, tune_output: Path):
object.__setattr__(self, "tune_output", self.base_dir / tune_output)

def get_candidate_spec_filename(self, candidate_id: int) -> str:
return f"{candidate_id}_spec.mlir"

Expand Down Expand Up @@ -725,6 +723,12 @@ def generate_candidate_specs(
return candidates


def get_compilation_success_rate(compiled_candiates: list[Any]) -> float:
successful_candidates = [c for c in compiled_candiates if c is not None]
success_rate = float(len(successful_candidates)) / float(len(compiled_candiates))
return success_rate


def collision_handler(index_hash_list: list[tuple[int, str]]) -> tuple[bool, list[int]]:
"""If a collision is found, generate a list of new indexes. If no collision, `unique_indexes = []`"""
# Check if candidate produces tbe same .vmfb
Expand Down Expand Up @@ -804,11 +808,11 @@ def compile(
compiled_candidates = multiprocess_progress_wrapper(
num_worker=num_worker, task_list=task_list, function=run_iree_compile_command
)
compiled_candidates = [c for c in compiled_candidates if c is not None]
success_rate = float(len(compiled_candidates)) / float(len(task_list))
success_rate = get_compilation_success_rate(compiled_candidates)
logging.info(
f"Successfully compiled [{len(compiled_candidates)}] candidates. Success rate: {success_rate:.2f}"
)
compiled_candidates = [c for c in compiled_candidates if c is not None]

# Remove duplicate vmfbs from the candidate list.
compiled_candidate_hashes = []
Expand All @@ -830,6 +834,7 @@ def select_best_benchmark_results(
candidate_results: list[BenchmarkResult],
baseline_results: list[BenchmarkResult],
num_candidates: Optional[int],
path_config: Optional[PathConfig] = None,
) -> list[BenchmarkResult]:
filtered_candidate_results = [r for r in candidate_results if math.isfinite(r.time)]
if len(filtered_candidate_results) == 0:
Expand Down Expand Up @@ -874,12 +879,19 @@ def get_speedup(result: BenchmarkResult) -> float:
]
logging.info(f"Selected top[{len(best_results)}]:")

results = []
for r in best_results:
if fallback_baseline_time is not None:
speedup = f"{round(get_speedup(r) * 100, 2)}% of baseline"
else:
speedup = "baseline unavailable"
logging.info(f"Candidate {r.candidate_id} time: {r.time:.2f} ({speedup})")
result = f"Candidate {r.candidate_id} time: {r.time:.2f} ({speedup})"
logging.info(result)
results.append(result)

if path_config is not None:
with open(path_config.output_dir, "a") as file:
file.writelines(f"{result}\n" for result in results)
return best_results


Expand Down Expand Up @@ -935,6 +947,7 @@ def benchmark(
candidate_results=candidate_results,
baseline_results=baseline_results,
num_candidates=num_candidates,
path_config=path_config,
)

top_candidates = [result.candidate_id for result in best_results]
Expand Down
11 changes: 11 additions & 0 deletions tuner/tuner/libtuner_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,17 @@ def test_validate_devices_with_invalid_device() -> None:
assert expected_call in mock_handle_error.call_args_list


def test_get_compilation_success_rate():
compiled_candidates = [0, None, 2, None, 4]
assert libtuner.get_compilation_success_rate(compiled_candidates) == 3.0 / 5.0

compiled_candidates = [0, 1, 2, 3, 4]
assert libtuner.get_compilation_success_rate(compiled_candidates) == 1.0

compiled_candidates = [None, None, None]
assert libtuner.get_compilation_success_rate(compiled_candidates) == 0.0


def test_select_best_benchmark_results() -> None:
candidate_results = [
libtuner.BenchmarkResult(1, 0.5, "hip://0"),
Expand Down

0 comments on commit 083ca1c

Please sign in to comment.