Skip to content

Commit

Permalink
feat: add CodSpeed to the project
Browse files Browse the repository at this point in the history
  • Loading branch information
adriencaccia committed Apr 17, 2024
1 parent 67bfab4 commit f39e8c3
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 21 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: codspeed-benchmarks

on:
push:
branches:
- "main" # or "master"
pull_request:
# `workflow_dispatch` allows CodSpeed to trigger backtest
# performance analysis in order to generate initial data.
workflow_dispatch:

jobs:
benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: "3.12"

- name: Install dependencies
run: |
python -m pip install -U pip setuptools wheel
pip install -U -r requirements_test_min.txt
- name: Run benchmarks
uses: CodSpeedHQ/action@v2
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: pytest tests/ --codspeed
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,4 @@ build-stamp
.pytest_cache/
.mypy_cache/
.benchmarks/
venv
1 change: 1 addition & 0 deletions requirements_test_min.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ typing-extensions~=4.11
py~=1.11.0
pytest~=7.4
pytest-benchmark~=4.0
pytest-codspeed~=2.2.1
pytest-timeout~=2.3
towncrier~=23.11
requests
Expand Down
21 changes: 0 additions & 21 deletions tests/benchmark/test_baseline_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,6 @@ def test_baseline_lots_of_files_j1(self, benchmark: BenchmarkFixture) -> None:
We do not register any checkers except the default 'main', so the cost is just
that of the system with a lot of files registered
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 1
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
Expand All @@ -197,9 +194,6 @@ def test_baseline_lots_of_files_j2(self, benchmark: BenchmarkFixture) -> None:
register any checkers except the default 'main', so the cost is just that of
the check_parallel system across 2 workers, plus the overhead of PyLinter
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 2
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
Expand All @@ -218,9 +212,6 @@ def test_baseline_lots_of_files_j1_empty_checker(
We use a checker that does no work, so the cost is just that of the system at
scale
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 1
linter.register_checker(NoWorkChecker(linter))
Expand All @@ -241,9 +232,6 @@ def test_baseline_lots_of_files_j2_empty_checker(
We use a checker that does no work, so the cost is just that of the system at
scale, across workers
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 2
linter.register_checker(NoWorkChecker(linter))
Expand All @@ -266,9 +254,6 @@ def test_baseline_benchmark_j1_single_working_checker(
We expect this benchmark to take very close to
`numfiles*SleepingChecker.sleep_duration`
"""
if benchmark.disabled:
benchmark(print, "skipping, do not want to sleep in main tests")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.register_checker(SleepingChecker(linter))

Expand All @@ -295,9 +280,6 @@ def test_baseline_benchmark_j2_single_working_checker(
Because of the cost of the framework and system the performance difference will
*not* be 1/2 of -j1 versions.
"""
if benchmark.disabled:
benchmark(print, "skipping, do not want to sleep in main tests")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 2
linter.register_checker(SleepingChecker(linter))
Expand Down Expand Up @@ -334,9 +316,6 @@ def test_baseline_benchmark_j1_all_checks_lots_of_files(
... that's the intent at least.
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter()

# Register all checkers/extensions and enable them
Expand Down

0 comments on commit f39e8c3

Please sign in to comment.