Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pin ruff tool #72

Merged
merged 2 commits into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ all:

SOURCES = hwbench csv graph

RUFF_VERSION = 0.9.0

update_deps:
uv sync -U

Expand All @@ -18,20 +20,20 @@ clean:

check:
@uv lock --locked || echo "Your lock file should change because you probably added a dependency or bump the minimal Python version. Please run `uv lock`"
uv tool run ruff format --diff $(SOURCES)
uv tool run ruff check $(SOURCES)
uv tool run ruff@$(RUFF_VERSION) format --diff $(SOURCES)
uv tool run ruff@$(RUFF_VERSION) check $(SOURCES)
uv run mypy $(SOURCES)
uv run pytest $(SOURCES)

check_ci:
@uv lock --locked || echo "Your lock file should change because you probably added a dependency or bump the minimal Python version, but this is not allowed in the CI. Please run `uv lock`"
uv tool run ruff format --diff $(SOURCES)
uv tool run ruff check --output-format=github $(SOURCES)
uv tool run ruff@$(RUFF_VERSION) format --diff $(SOURCES)
uv tool run ruff@$(RUFF_VERSION) check --output-format=github $(SOURCES)
uv run mypy $(SOURCES)
uv run pytest $(SOURCES)

bundle:
uv build

format:
uv tool run ruff format $(SOURCES)
uv tool run ruff@$(RUFF_VERSION) format $(SOURCES)
8 changes: 4 additions & 4 deletions csv/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ def memrate_key(r):
r.get("job_name", "")
+ r.get("engine_module", "")
+ r.get("key", "")
+ f'{r.get("workers", ""):05}'
+ f'{r.get("job_number", ""):06}'
+ f"{r.get('workers', ''):05}"
+ f"{r.get('job_number', ''):06}"
)

results_sorted = sorted(result_list, key=memrate_key)
Expand All @@ -147,8 +147,8 @@ def result_key(r):
+ r.get("engine_module", "")
+ r.get("engine_module_parameter", "")
+ r.get("job_name", "")
+ f'{r.get("workers", ""):05}'
+ f'{r.get("job_number", "")}'
+ f"{r.get('workers', ''):05}"
+ f"{r.get('job_number', '')}"
)


Expand Down
4 changes: 2 additions & 2 deletions graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def generic_graph(
# If the user didn't explictely agreed to be replaced by 0, let's be fatal
if not args.ignore_missing_datapoint:
fatal(
f"{trace.get_name()}/{bench.get_bench_name()}: {component.get_full_name()} is missing the {sample+1}th data point.\
f"{trace.get_name()}/{bench.get_bench_name()}: {component.get_full_name()} is missing the {sample + 1}th data point.\
Use --ignore-missing-datapoint to ignore this case. Generated graphs will be partially incorrect."
)
else:
Expand Down Expand Up @@ -292,7 +292,7 @@ def generic_graph(
# If the user didn't explictely agreed to be replaced by 0, let's be fatal
if not args.ignore_missing_datapoint:
fatal(
f"{trace.get_name()}/{bench.get_bench_name()}: second axis of {sensor}: {measure.get_full_name()} is missing the {sample+1}th data point.\
f"{trace.get_name()}/{bench.get_bench_name()}: second axis of {sensor}: {measure.get_full_name()} is missing the {sample + 1}th data point.\
Use --ignore-missing-datapoint to ignore this case. Generated graphs will be partially incorrect."
)
else:
Expand Down
2 changes: 1 addition & 1 deletion graph/individual.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
clean_perf = perf.replace(" ", "").replace("/", "")
y_label = unit
outdir = temp_outdir.joinpath(graph_type)
outfile = f"{bench.get_title_engine_name().replace(' ','_')}"
outfile = f"{bench.get_title_engine_name().replace(' ', '_')}"

# Let's define the tree architecture based on the benchmark profile
# If the benchmark has multiple performance results, let's put them in a specific directory
Expand Down
8 changes: 4 additions & 4 deletions graph/scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,21 +95,21 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
f"Scaling {graph_type}: '{bench.get_title_engine_name()} / {args.traces[0].get_metric_name()}'"
)
y_label = f"{unit} per Watt"
outfile = f"scaling_watt_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
outfile = f"scaling_watt_{clean_perf}_{bench.get_title_engine_name().replace(' ', '_')}"
y_source = aggregated_perfs_watt
elif "watts" in graph_type:
graph_type_title = f"Scaling {graph_type}: {args.traces[0].get_metric_name()}"
outfile = f"scaling_watt_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
outfile = f"scaling_watt_{clean_perf}_{bench.get_title_engine_name().replace(' ', '_')}"
y_label = "Watts"
y_source = aggregated_watt
elif "cpu_clock" in graph_type:
graph_type_title = f"Scaling {graph_type}: {args.traces[0].get_metric_name()}"
outfile = f"scaling_cpu_clock_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
outfile = f"scaling_cpu_clock_{clean_perf}_{bench.get_title_engine_name().replace(' ', '_')}"
y_label = "Mhz"
y_source = aggregated_cpu_clock
else:
graph_type_title = f"Scaling {graph_type}: {bench.get_title_engine_name()}"
outfile = f"scaling_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
outfile = f"scaling_{clean_perf}_{bench.get_title_engine_name().replace(' ', '_')}"
y_source = aggregated_perfs

title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n\n Stressor: '
Expand Down
2 changes: 1 addition & 1 deletion graph/trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def add_perf(
if delta > 1:
print(
f"{self.trace.get_name()}/{self.get_bench_name()} didn't completed on time. "
f"Effective_runtime={effective_runtime} vs {self.duration()} : delta=[{delta:.2f}s; {delta/self.duration()*100:.2f}%]"
f"Effective_runtime={effective_runtime} vs {self.duration()} : delta=[{delta:.2f}s; {delta / self.duration() * 100:.2f}%]"
)
except TypeError:
# We can ignore the delay computation if effective_runtime is not defined
Expand Down
2 changes: 1 addition & 1 deletion hwbench/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def get_physical_cores(core):
# Let's replace 'all' special keyword if any
all = re.findall("all", hcc)
if all:
hcc = hcc.replace("all", f"0-{self.hardware.get_cpu().get_logical_cores_count()-1}")
hcc = hcc.replace("all", f"0-{self.hardware.get_cpu().get_logical_cores_count() - 1}")

# Let's replace helpers if any
helpers = re.findall("simple", hcc)
Expand Down
2 changes: 1 addition & 1 deletion hwbench/environment/turbostat.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def run(self, interval: float = 1, wait=False):
cmd_line = [
"taskset",
"-c",
f"{self.hardware.get_cpu().get_logical_cores_count()-1}",
f"{self.hardware.get_cpu().get_logical_cores_count() - 1}",
"turbostat",
"--cpu",
"core",
Expand Down
2 changes: 1 addition & 1 deletion hwbench/environment/vendors/hpe/hpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def read_power_supplies(
# Let's inform the user the PSU is reported as non healthy
self.__warn_psu(
psu_position,
f'marked as {psu_state} in {psu_status.get("Health")} state',
f"marked as {psu_state} in {psu_status.get('Health')} state",
)
continue

Expand Down
2 changes: 1 addition & 1 deletion hwbench/utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def cpu_list_to_range(cpu_list: list[int]) -> str:

if not is_immediately_next:
if needs_compression:
output.append(f"{previous_entry}-{cpu_list[i-1]}")
output.append(f"{previous_entry}-{cpu_list[i - 1]}")
else:
output.append(str(previous_entry))
previous_entry = current_entry
Expand Down
Loading