diff --git a/.github/workflows/benchmark_pr.yml b/.github/workflows/benchmark_pr.yml index 1af037fd6..b76bccf9c 100644 --- a/.github/workflows/benchmark_pr.yml +++ b/.github/workflows/benchmark_pr.yml @@ -1,7 +1,14 @@ name: Benchmark a pull request on: + push: + branches: + - "main" + # Run on pull requests pull_request: + # `workflow_dispatch` allows CodSpeed to trigger backtest + # performance analysis in order to generate initial data. + workflow_dispatch: permissions: pull-requests: write @@ -16,61 +23,26 @@ jobs: with: version: "1" - uses: julia-actions/cache@v1 - - name: Extract Package Name from Project.toml - id: extract-package-name - run: | - PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/') - echo "::set-output name=package_name::$PACKAGE_NAME" - - name: Build AirspeedVelocity + - name: Install ValgrindBenchmarkTools env: - JULIA_NUM_THREADS: 2 + JULIA_PKG_PRECOMPILE_AUTO: false + run: | + julia -e 'import Pkg; Pkg.add(url="https://github.com/JuliaCI/BenchmarkTools.jl", rev="vc/extendable")' + julia -e 'import Pkg; Pkg.add(url="https://github.com/JuliaPerf/Valgrind.jl", rev="vc/bt")' + julia -e 'import Pkg; Pkg.add(url="https://github.com/JuliaPerf/Valgrind.jl", rev="vc/bt", subdir="lib/ValgrindBenchmarkTools")' + - name: Precompile + env: + JULIA_CPU_TARGET: "haswell" # CPU emulated by Valgrind run: | - # Lightweight build step, as sometimes the runner runs out of memory: - julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add("AirspeedVelocity")' - julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")' + julia -e 'import Pkg; Pkg.precompile()' + julia --project=. -e 'import Pkg; Pkg.precompile()' - name: Add ~/.julia/bin to PATH run: | echo "$HOME/.julia/bin" >> $GITHUB_PATH - name: Run benchmarks - run: | - echo $PATH - ls -l ~/.julia/bin - mkdir results - benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.repository.default_branch}}" --output-dir=results/ --tune - - name: Create plots from benchmarks - run: | - mkdir -p plots - benchpkgplot ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --npart=10 --format=png --input-dir=results/ --output-dir=plots/ - - name: Upload plot as artifact - uses: actions/upload-artifact@v2 - with: - name: plots - path: plots - - name: Create markdown table from benchmarks - run: | - benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md - echo '### Benchmark Results' > body.md - echo '' >> body.md - echo '' >> body.md - cat table.md >> body.md - echo '' >> body.md - echo '' >> body.md - echo '### Benchmark Plots' >> body.md - echo 'A plot of the benchmark results have been uploaded as an artifact to the workflow run for this PR.' >> body.md - echo 'Go to "Actions"->"Benchmark a pull request"->[the most recent run]->"Artifacts" (at the bottom).' >> body.md - - - name: Find Comment - uses: peter-evans/find-comment@v2 - id: fcbenchmark - with: - issue-number: ${{ github.event.pull_request.number }} - comment-author: 'github-actions[bot]' - body-includes: Benchmark Results - - - name: Comment on PR - uses: peter-evans/create-or-update-comment@v3 + uses: CodSpeedHQ/action@v3 + env: + ENABLE_JITPROFILING: 1 with: - comment-id: ${{ steps.fcbenchmark.outputs.comment-id }} - issue-number: ${{ github.event.pull_request.number }} - body-path: body.md - edit-mode: replace + token: ${{ secrets.CODSPEED_TOKEN }} + run: 'julia --project=. -L benchmark/benchmarks.jl -e "run(SUITE, \"\")"' diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index b153f60ee..361ec7014 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -3,7 +3,7 @@ # result = benchmarkpkg(KernelAbstractions, BenchmarkConfig(env=Dict("KA_BACKEND"=>"CPU", "JULIA_NUM_THREADS"=>"auto"))) # export_markdown("perf.md", result) -using BenchmarkTools +using ValgrindBenchmarkTools using KernelAbstractions using Random