diff --git a/code_to_optimize/tests/pytest/benchmarks/test_benchmark_bubble_sort.py b/code_to_optimize/tests/pytest/benchmarks/test_benchmark_bubble_sort.py index 3d7b24a6c..91cd1777f 100644 --- a/code_to_optimize/tests/pytest/benchmarks/test_benchmark_bubble_sort.py +++ b/code_to_optimize/tests/pytest/benchmarks/test_benchmark_bubble_sort.py @@ -7,6 +7,13 @@ def test_sort(benchmark): result = benchmark(sorter, list(reversed(range(500)))) assert result == list(range(500)) +@pytest.mark.benchmark +def test_sort_with_marker(benchmark): + @benchmark + def inner_fn(): + return sorter(list(reversed(range(5)))) + assert 1==1 + # This should not be picked up as a benchmark test def test_sort2(): result = sorter(list(reversed(range(500)))) diff --git a/codeflash/benchmarking/plugin/custom_plugin.py b/codeflash/benchmarking/plugin/custom_plugin.py new file mode 100644 index 000000000..77479435a --- /dev/null +++ b/codeflash/benchmarking/plugin/custom_plugin.py @@ -0,0 +1,77 @@ +from typing import Any, Callable, Optional + +import pytest + + +def pytest_configure(config) -> None: # noqa: ANN001 + config.addinivalue_line("markers", "benchmark") + + +@pytest.fixture +def benchmark(request): # noqa: ANN201, ANN001 + class CustomBenchmark: + def __init__(self) -> None: + self.stats = [] + + def __call__(self, func, *args, **kwargs): # noqa: ANN204, ANN001, ANN002, ANN003 + # Just call the function without measuring anything + return func(*args, **kwargs) + + def __getattr__(self, name): # noqa: ANN204, ANN001 + # Return a no-op callable for any attribute + return lambda *args, **kwargs: None # noqa: ARG005 + + def pedantic( + self, + target: Callable, + args: tuple = (), + kwargs: Optional[dict] = None, # noqa: FA100 + iterations: int = 1, # noqa: ARG002 + rounds: int = 1, # noqa: ARG002 + warmup_rounds: int = 0, # noqa: ARG002 + setup: Optional[Callable] = None, # noqa: FA100 + ) -> Any: # noqa: ANN401 + """Mimics the pedantic method of pytest-benchmark.""" + if setup: + setup() + if kwargs is None: + kwargs = {} + return target(*args, **kwargs) + + @property + def group(self): # noqa: ANN202 + """Return a dummy group object.""" + return type("Group", (), {"name": "dummy"})() + + @property + def name(self) -> str: + """Return a dummy name.""" + return "dummy_benchmark" + + @property + def fullname(self) -> str: + """Return a dummy fullname.""" + return "dummy::benchmark" + + @property + def params(self): # noqa: ANN202 + """Return empty params.""" + return {} + + @property + def extra_info(self): # noqa: ANN202 + """Return empty extra info.""" + return {} + + # Check if benchmark fixture is already available (pytest-benchmark is active) + if "benchmark" in request.fixturenames and hasattr(request, "_fixturemanager"): + try: + # Try to get the real benchmark fixture + return request.getfixturevalue("benchmark") + except (pytest.FixtureLookupError, AttributeError): + pass + custom_benchmark = CustomBenchmark() + if request.node.get_closest_marker("benchmark"): + # Return our custom benchmark for tests marked with @pytest.mark.benchmark + return custom_benchmark + return custom_benchmark