Skip to content

Custom Benchmark fixture for running benchmark tests when the pytest-benchmark plugin is disabled #302

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
Open
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,13 @@ def test_sort(benchmark):
result = benchmark(sorter, list(reversed(range(500))))
assert result == list(range(500))

@pytest.mark.benchmark
def test_sort_with_marker(benchmark):
@benchmark
def inner_fn():
return sorter(list(reversed(range(5))))
assert 1==1

# This should not be picked up as a benchmark test
def test_sort2():
result = sorter(list(reversed(range(500))))
Expand Down
77 changes: 77 additions & 0 deletions codeflash/benchmarking/plugin/custom_plugin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from typing import Any, Callable, Optional

import pytest


def pytest_configure(config) -> None: # noqa: ANN001
config.addinivalue_line("markers", "benchmark")


@pytest.fixture
def benchmark(request): # noqa: ANN201, ANN001
class CustomBenchmark:
def __init__(self) -> None:
self.stats = []

def __call__(self, func, *args, **kwargs): # noqa: ANN204, ANN001, ANN002, ANN003
# Just call the function without measuring anything
return func(*args, **kwargs)

def __getattr__(self, name): # noqa: ANN204, ANN001
# Return a no-op callable for any attribute
return lambda *args, **kwargs: None # noqa: ARG005

def pedantic(
self,
target: Callable,
args: tuple = (),
kwargs: Optional[dict] = None, # noqa: FA100
iterations: int = 1, # noqa: ARG002
rounds: int = 1, # noqa: ARG002
warmup_rounds: int = 0, # noqa: ARG002
setup: Optional[Callable] = None, # noqa: FA100
) -> Any: # noqa: ANN401
"""Mimics the pedantic method of pytest-benchmark."""
if setup:
setup()
if kwargs is None:
kwargs = {}
return target(*args, **kwargs)

@property
def group(self): # noqa: ANN202
"""Return a dummy group object."""
return type("Group", (), {"name": "dummy"})()

@property
def name(self) -> str:
"""Return a dummy name."""
return "dummy_benchmark"

@property
def fullname(self) -> str:
"""Return a dummy fullname."""
return "dummy::benchmark"

@property
def params(self): # noqa: ANN202
"""Return empty params."""
return {}

@property
def extra_info(self): # noqa: ANN202
"""Return empty extra info."""
return {}

# Check if benchmark fixture is already available (pytest-benchmark is active)
if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"):
try:
# Try to get the real benchmark fixture
return request.getfixturevalue("benchmark")
except (pytest.FixtureLookupError, AttributeError):
pass
custom_benchmark = CustomBenchmark()
if request.node.get_closest_marker("benchmark"):
# Return our custom benchmark for tests marked with @pytest.mark.benchmark
return custom_benchmark
return custom_benchmark
12 changes: 9 additions & 3 deletions codeflash/verification/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,9 @@ def run_behavioral_tests(
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]

pytest_test_env = test_env.copy()
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
pytest_test_env["PYTEST_PLUGINS"] = (
"codeflash.verification.pytest_plugin,codeflash.benchmarking.plugin.custom_plugin"
)

if enable_coverage:
coverage_database_file, coverage_config_file = prepare_coverage_files()
Expand Down Expand Up @@ -191,7 +193,9 @@ def run_line_profile_tests(
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]
pytest_test_env = test_env.copy()
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
pytest_test_env["PYTEST_PLUGINS"] = (
"codeflash.verification.pytest_plugin,codeflash.benchmarking.plugin.custom_plugin"
)
blocklist_args = [f"-p no:{plugin}" for plugin in BENCHMARKING_BLOCKLISTED_PLUGINS]
pytest_test_env["LINE_PROFILE"] = "1"
results = execute_test_subprocess(
Expand Down Expand Up @@ -252,7 +256,9 @@ def run_benchmarking_tests(
result_file_path = get_run_tmp_file(Path("pytest_results.xml"))
result_args = [f"--junitxml={result_file_path.as_posix()}", "-o", "junit_logging=all"]
pytest_test_env = test_env.copy()
pytest_test_env["PYTEST_PLUGINS"] = "codeflash.verification.pytest_plugin"
pytest_test_env["PYTEST_PLUGINS"] = (
"codeflash.verification.pytest_plugin,codeflash.benchmarking.plugin.custom_plugin"
)
blocklist_args = [f"-p no:{plugin}" for plugin in BENCHMARKING_BLOCKLISTED_PLUGINS]
results = execute_test_subprocess(
pytest_cmd_list + pytest_args + blocklist_args + result_args + test_files,
Expand Down
Loading