Skip to content

Commit ea3c199

Browse files
committed
FIX precommit fixes
1 parent 1b5625c commit ea3c199

6 files changed

Lines changed: 51 additions & 32 deletions

File tree

python/cuml/cuml/benchmark/config.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,9 @@ def validate_config(raw_config: dict[str, Any]) -> None:
219219
defaults = raw_config.get("defaults", {})
220220
if not isinstance(defaults, dict):
221221
raise BenchmarkConfigError("Config field 'defaults' must be a mapping")
222-
_validate_default_or_entry(defaults, context="defaults", require_algorithm=False)
222+
_validate_default_or_entry(
223+
defaults, context="defaults", require_algorithm=False
224+
)
223225

224226
profiles = raw_config.get("profiles", {})
225227
if not isinstance(profiles, dict):
@@ -375,7 +377,9 @@ def _validate_default_or_entry(
375377
)
376378

377379
for numeric_field in ("n_reps", "random_state"):
378-
if numeric_field in entry and not isinstance(entry[numeric_field], int):
380+
if numeric_field in entry and not isinstance(
381+
entry[numeric_field], int
382+
):
379383
raise BenchmarkConfigError(
380384
f"{context} field '{numeric_field}' must be an integer"
381385
)
@@ -389,7 +393,9 @@ def _validate_default_or_entry(
389393
if "rows" in entry:
390394
_normalize_int_list(entry["rows"], field_name=f"{context}.rows")
391395
if "features" in entry:
392-
_normalize_int_list(entry["features"], field_name=f"{context}.features")
396+
_normalize_int_list(
397+
entry["features"], field_name=f"{context}.features"
398+
)
393399
if "shapes" in entry:
394400
_normalize_shapes(entry["shapes"], field_name=f"{context}.shapes")
395401

@@ -454,7 +460,9 @@ def _apply_defaults(
454460

455461
def _validate_post_defaults_entry(entry: dict[str, Any]) -> None:
456462
_validate_default_or_entry(
457-
entry, context=f"benchmark '{entry.get('id', entry['algorithm'])}'", require_algorithm=True
463+
entry,
464+
context=f"benchmark '{entry.get('id', entry['algorithm'])}'",
465+
require_algorithm=True,
458466
)
459467

460468
benchmark_name = entry.get("id", entry["algorithm"])
@@ -600,7 +608,8 @@ def _apply_algorithm_filter(
600608

601609

602610
def _build_override_list(
603-
fixed_values: dict[str, Any] | None, grid_values: dict[str, list[Any]] | None
611+
fixed_values: dict[str, Any] | None,
612+
grid_values: dict[str, list[Any]] | None,
604613
) -> list[dict[str, Any]]:
605614
fixed = deepcopy(fixed_values or {})
606615
grid = deepcopy(grid_values or {})
@@ -645,9 +654,7 @@ def _normalize_int_list(
645654
return value
646655

647656

648-
def _normalize_shapes(
649-
value: Any, *, field_name: str
650-
) -> list[dict[str, int]]:
657+
def _normalize_shapes(value: Any, *, field_name: str) -> list[dict[str, int]]:
651658
if not isinstance(value, list) or not value:
652659
raise BenchmarkConfigError(
653660
f"Field '{field_name}' must be a non-empty list"

python/cuml/cuml/benchmark/configs/single_gpu.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1557,4 +1557,3 @@ benchmarks:
15571557
id: multinomialnb_fit_wide_nightly
15581558
rows: [8000000]
15591559
tags: [nightly-only, wide, naive-bayes]
1560-

python/cuml/cuml/benchmark/run_benchmarks.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -264,8 +264,7 @@ def _validate_benchmark_inputs(test_split, input_type, run_gpu):
264264
"""Validate per-benchmark inputs and normalize CPU-only input types."""
265265
if not 0.0 <= test_split <= 1.0:
266266
raise ValueError(
267-
"test_split: got %f, want a value between 0.0 and 1.0"
268-
% test_split
267+
"test_split: got %f, want a value between 0.0 and 1.0" % test_split
269268
)
270269

271270
if run_gpu:
@@ -343,7 +342,12 @@ def _resolved_entry_dimensions(entry, args, explicit_options):
343342
if "--default-size" in explicit_options:
344343
return None, [0], [0]
345344

346-
row_override_flags = {"--min-rows", "--max-rows", "--num-sizes", "--num-rows"}
345+
row_override_flags = {
346+
"--min-rows",
347+
"--max-rows",
348+
"--num-sizes",
349+
"--num-rows",
350+
}
347351
if "--num-rows" in explicit_options:
348352
base_rows = [args.num_rows]
349353
elif row_override_flags.intersection(explicit_options):
@@ -383,7 +387,9 @@ def _run_config_benchmarks(args, explicit_options):
383387
"profile and filters."
384388
)
385389

386-
allow_gpu_runs = is_gpu_available() and "--skip-gpu" not in explicit_options
390+
allow_gpu_runs = (
391+
is_gpu_available() and "--skip-gpu" not in explicit_options
392+
)
387393
if allow_gpu_runs and any(entry["run_gpu"] for entry in benchmark_entries):
388394
setup_rmm_allocator(args.rmm_allocator)
389395

python/cuml/cuml/benchmark/runners.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,13 @@
1515
# Supports both package and standalone execution
1616
try:
1717
from cuml.benchmark import datagen
18-
from cuml.benchmark.gpu_check import is_cuml_available, is_gpu_available
18+
from cuml.benchmark.gpu_check import is_gpu_available
1919
except ImportError:
2020
if not any("cuml/benchmark" in p for p in sys.path):
2121
raise
2222
import datagen # noqa: E402
23-
from gpu_check import is_cuml_available, is_gpu_available # noqa: E402
23+
from gpu_check import is_gpu_available # noqa: E402
24+
2425

2526
def _metric_array_to_numpy(data):
2627
"""Convert metric inputs to NumPy for sklearn compatibility."""
@@ -350,9 +351,7 @@ def _run_one_size(
350351
y_pred_cpu = cpu_model.transform(X_test)
351352
y_test = _metric_array_to_numpy(y_test)
352353
y_pred_cpu = _metric_array_to_numpy(y_pred_cpu)
353-
cpu_accuracy = algo_pair.accuracy_function(
354-
y_test, y_pred_cpu
355-
)
354+
cpu_accuracy = algo_pair.accuracy_function(y_test, y_pred_cpu)
356355

357356
if n_samples == 0:
358357
# Update n_samples = training samples + testing samples

python/cuml/tests/test_benchmark_config.py

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,15 @@ def test_load_and_resolve_config_default_profile_filters_single_gpu_manifest():
5252
/ "configs"
5353
/ "single_gpu.yaml"
5454
)
55-
resolved = load_and_resolve_config(
56-
str(config_path)
57-
)
55+
resolved = load_and_resolve_config(str(config_path))
5856

5957
benchmark_ids = {entry["benchmark_id"] for entry in resolved["benchmarks"]}
6058

6159
assert resolved["suite_name"] == "single_gpu"
6260
assert resolved["profile"] == "default"
63-
assert all(entry["input_type"] == "cupy" for entry in resolved["benchmarks"])
61+
assert all(
62+
entry["input_type"] == "cupy" for entry in resolved["benchmarks"]
63+
)
6464
assert "logreg_fit_narrow_default" in benchmark_ids
6565
assert "logreg_fit_medium_default" in benchmark_ids
6666
assert "logreg_fit_wide_default" in benchmark_ids
@@ -79,11 +79,15 @@ def test_load_and_resolve_config_single_gpu_profiles_preserve_algorithm_set():
7979
/ "single_gpu.yaml"
8080
)
8181

82-
default_resolved = load_and_resolve_config(str(config_path), profile="default")
82+
default_resolved = load_and_resolve_config(
83+
str(config_path), profile="default"
84+
)
8385
extended_resolved = load_and_resolve_config(
8486
str(config_path), profile="extended"
8587
)
86-
nightly_resolved = load_and_resolve_config(str(config_path), profile="nightly")
88+
nightly_resolved = load_and_resolve_config(
89+
str(config_path), profile="nightly"
90+
)
8791

8892
default_algorithms = {
8993
entry["algorithm"] for entry in default_resolved["benchmarks"]
@@ -270,12 +274,14 @@ def test_run_config_benchmarks_uses_shape_pairs_without_cartesian_product(
270274
([250], [16]),
271275
]
272276
assert list(results["benchmark_id"]) == ["shape-bench", "shape-bench"]
273-
assert set(["config_path", "suite_name", "suite_tier", "profile"]).issubset(
274-
results.columns
275-
)
277+
assert set(
278+
["config_path", "suite_name", "suite_tier", "profile"]
279+
).issubset(results.columns)
276280

277281

278-
def test_run_config_benchmarks_applies_only_explicit_cli_overrides(monkeypatch):
282+
def test_run_config_benchmarks_applies_only_explicit_cli_overrides(
283+
monkeypatch,
284+
):
279285
calls = []
280286
setup_calls = []
281287

@@ -396,9 +402,9 @@ def test_main_runs_config_smoke_manifest_end_to_end(monkeypatch, tmp_path):
396402
assert [kwargs["bench_dims"] for _, kwargs in calls] == [[8], [8]]
397403

398404
results = pd.read_csv(csv_path)
399-
assert set(["benchmark_id", "config_path", "suite_name", "suite_tier", "profile"]).issubset(
400-
results.columns
401-
)
405+
assert set(
406+
["benchmark_id", "config_path", "suite_name", "suite_tier", "profile"]
407+
).issubset(results.columns)
402408
assert set(results["benchmark_id"]) == {
403409
"test_logreg_fit",
404410
"test_scaler_fittransform",

python/cuml/tests/test_benchmark_runners.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,9 @@ def test_accuracy_runner_converts_cupy_metric_inputs(monkeypatch):
4545
monkeypatch.setattr(
4646
"cuml.benchmark.runners.datagen.gen_data", lambda *args, **kwargs: data
4747
)
48-
monkeypatch.setattr("cuml.benchmark.runners.is_gpu_available", lambda: True)
48+
monkeypatch.setattr(
49+
"cuml.benchmark.runners.is_gpu_available", lambda: True
50+
)
4951

5052
result = runner._run_one_size(
5153
_MockAlgoPair(),

0 commit comments

Comments
 (0)