Skip to content

Commit

Permalink
Refactor job triggers
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelburnham committed Oct 31, 2023
1 parent 3bc2cdf commit 8aa4568
Show file tree
Hide file tree
Showing 8 changed files with 85 additions and 85 deletions.
27 changes: 16 additions & 11 deletions .github/workflows/bench-deploy.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
name: GPU benchmarks
name: GPU benchmark on `master`
on:
workflow_dispatch:
release:
types: [published]
push:
branches:
- master

jobs:
# TODO: Account for different `justfile` and `bench.env` files
# One option is to upload them to gh-pages for qualitative comparison
# TODO: Fall back to a default if `justfile`/`bench.env` not present
benchmark:
name: Bench and deploy
runs-on: [self-hosted, gpu-bench-t4]
runs-on: [self-hosted, gpu-bench, gh-pages]
steps:
# Install deps
- uses: actions/checkout@v4
Expand All @@ -16,14 +19,12 @@ jobs:
- uses: taiki-e/install-action@v2
with:
tool: just@1.15.0

# Set up GPU
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version

# Run benchmarks and deploy
- name: Get old benchmarks
uses: actions/checkout@v4
Expand All @@ -34,21 +35,25 @@ jobs:
- name: Install criterion
run: cargo install cargo-criterion
- name: Run benchmarks
run: just --dotenv-filename bench.env gpu-bench fibonacci
run: just --dotenv-filename bench.env gpu-bench fibonacci_lem
# TODO: Prettify labels for easier viewing
# Compress the benchmark file and metadata for later analysis
- name: Compress artifacts
run: tar -cvzf ${{ github.sha }}.tar.gz Cargo.lock ${{ github.sha }}.json
run: |
echo $LABELS > labels.md
tar -cvzf ${{ github.sha }}.tar.gz Cargo.lock ${{ github.sha }}.json labels.md
- name: Deploy latest benchmark report
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./target/criterion
destination_dir: benchmarks/criterion
- name: Move benchmark json to history
- name: Copy benchmark json to history
run: mkdir history; cp ${{ github.sha }}.tar.gz history/
- name: Deploy benchmark history
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: history/
destination_dir: benchmarks/history
keep_files: true
keep_files: true
8 changes: 4 additions & 4 deletions .github/workflows/bench-pr-comment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,19 +35,19 @@ jobs:
- uses: boa-dev/criterion-compare-action@v3
with:
# Optional. Compare only this benchmark target
benchName: "end2end"
benchName: "fibonacci_lem"
# Needed. The name of the branch to compare with
branchName: ${{ github.ref_name }}

# TODO: Check it works with forked PRs when running
# `gh pr checkout {{ github.event.issue.number}}` with `env: GH_TOKEN`
gpu-benchmark:
name: run fibonacci benchmark on GPU
runs-on: [self-hosted, gpu-bench-t4]
runs-on: [self-hosted, gpu-bench]
if:
github.event.issue.pull_request
&& github.event.issue.state == 'open'
&& contains(github.event.comment.body, '!benchmark')
&& contains(github.event.comment.body, '!gpu-benchmark')
&& (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER')
steps:
# Set up GPU
Expand Down Expand Up @@ -83,7 +83,7 @@ jobs:
- uses: boa-dev/criterion-compare-action@v3
with:
# Optional. Compare only this benchmark target
benchName: "fibonacci"
benchName: "fibonacci_lem"
# Optional. Features activated in the benchmark
features: "cuda"
# Needed. The name of the branch to compare with
Expand Down
9 changes: 6 additions & 3 deletions .github/workflows/gpu.yml → .github/workflows/gpu-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
name: GPU tests

on:
push:
branches:
- master
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
branches: [master]
merge_group:

env:
CARGO_TERM_COLOR: always
Expand Down Expand Up @@ -36,6 +37,7 @@ concurrency:
jobs:
cuda:
name: Rust tests on CUDA
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
runs-on: [self-hosted, gpu-ci]
env:
NVIDIA_VISIBLE_DEVICES: all
Expand Down Expand Up @@ -68,6 +70,7 @@ jobs:
opencl:
name: Rust tests on OpenCL
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
runs-on: [self-hosted, gpu-ci]
env:
NVIDIA_VISIBLE_DEVICES: all
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ on:
types: [opened, synchronize, reopened, ready_for_review]
branches: [master]
merge_group:
# Manual trigger for early signal on local branches
workflow_dispatch:

concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
Expand Down Expand Up @@ -67,56 +65,51 @@ jobs:
cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)'
# TODO: Make this a required status check
# TODO: Cache successful bench run from PR branch on master, keyed on commit hash
# Run comparative benchmark against master
# Run comparative benchmark against master, reject on regression
gpu-benchmark:
# [TEMPORARY] Test one run before attempting merge
#if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
name: Run fibonacci bench on GPU
runs-on: [self-hosted, gpu-bench-t4]
runs-on: [self-hosted, gpu-bench]
steps:
# TODO: Factor this out into an action or into justfile, it's used in 4 places
# TODO: Factor out GPU setup into an action or into justfile, it's used in 4 places
# Set up GPU
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
- uses: actions/checkout@v4
# Checkout base branch for comparative bench
- uses: actions/checkout@v4
with:
ref: master
- run: ls -a
- name: Set base ref variable
run: echo "BASE_REF=$(git rev-parse HEAD)" >> $GITHUB_ENV
# Checkout the justfile and env of the source branch so the base can bench
- run: git restore --source ${{ github.sha }} justfile bench.env
- run: ls -a
# Install dependencies
- uses: actions-rs/toolchain@v1
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@v2
with:
tool: just@1
# Run benchmark on base branch
tool: just@1.15
- name: Install criterion
run: |
cargo install cargo-criterion
cargo install criterion-table
- name: Run GPU bench
run: just --dotenv-filename bench.env gpu-bench fibonacci
# Switch to triggering branch and run benchmark
- run: rm justfile bench.env
# Checkout base branch for comparative bench
- uses: actions/checkout@v4
with:
ref: ${{ github.sha }}
- name: Run GPU bench on source branch
run: just --dotenv-filename bench.env gpu-bench fibonacci
# Create a comparative `criterion-table` and write in commit comment
ref: master
path: master
# Copy the script so the base can bench with the same parameters
- name: Copy source script to base branch
run: cd benches && cp justfile bench.env ../master/benches
- name: Set base ref variable
run: cd master && echo "BASE_REF=$(git rev-parse HEAD)" >> $GITHUB_ENV
- run: echo ${{ env.BASE_REF }}
- name: Run GPU bench on base branch
run: cd master/benches && just --dotenv-filename bench.env gpu-bench fibonacci_lem
- name: Copy bench output to PR branch
run: cp master/${{ env.BASE_REF }}.json .
- name: Run GPU bench on PR branch
run: cd benches && just --dotenv-filename bench.env gpu-bench fibonacci_lem
# Create a `criterion-table` and write in commit comment
- name: Run `criterion-table`
run: cat ${{ env.BASE_REF }}.json ${{ github.sha }}.json | criterion-table > BENCHMARKS.md
- name: Write comparative bench on commit comment
run: cat ${{ github.sha }}.json | criterion-table > BENCHMARKS.md
- name: Write bench on commit comment
uses: peter-evans/commit-comment@v3
with:
body-path: BENCHMARKS.md
Expand All @@ -132,3 +125,4 @@ jobs:
with:
script: |
core.setFailed('Fibonacci bench regression detected')
4 changes: 2 additions & 2 deletions bench.env → benches/bench.env
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# Lurk config
LURK_PERF=max-parallel-simple
LURK_RC=100,600
LURK_BENCH_NOISE_THRESHOLD=0.10
LURK_BENCH_NOISE_THRESHOLD=0.05

# CUDA config
NVIDIA_VISIBLE_DEVICES=all
NVIDIA_DRIVER_CAPABILITITES=compute,utility
EC_GPU_FRAMEWORK=cuda
EC_GPU_FRAMEWORK=cuda
33 changes: 1 addition & 32 deletions benches/fibonacci.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration};

use anyhow::anyhow;
use criterion::{
black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup,
BenchmarkId, Criterion, SamplingMode,
Expand Down Expand Up @@ -119,44 +118,14 @@ fn fibo_prove<M: measurement::Measurement>(
);
}

fn rc_env() -> anyhow::Result<Vec<usize>> {
std::env::var("LURK_RC")
.map_err(|e| anyhow!("Reduction count env var isn't set: {e}"))
.and_then(|rc| {
let vec: anyhow::Result<Vec<usize>> = rc
.split(',')
.map(|rc| {
rc.parse::<usize>()
.map_err(|e| anyhow!("Failed to parse RC: {e}"))
})
.collect();
vec
})
}

fn noise_threshold_env() -> anyhow::Result<f64> {
std::env::var("LURK_BENCH_NOISE_THRESHOLD")
.map_err(|e| anyhow!("Noise threshold env var isn't set: {e}"))
.and_then(|nt| {
nt.parse::<f64>()
.map_err(|e| anyhow!("Failed to parse noise threshold: {e}"))
})
}

fn fibonacci_prove(c: &mut Criterion) {
tracing_subscriber::fmt::init();
set_bench_config();
tracing::debug!("{:?}", lurk::config::LURK_CONFIG);

let reduction_counts = rc_env().unwrap_or_else(|_| vec![100]);
tracing::debug!("Fibonacci bench RCs: {:?}", &reduction_counts);
let reduction_counts = [100, 600, 700, 800, 900];
let batch_sizes = [100, 200];

let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove");
group.sampling_mode(SamplingMode::Flat); // This can take a *while*
group.sample_size(10);
group.noise_threshold(noise_threshold_env().unwrap_or(0.05));

let state = State::init_lurk_state().rccell();

for fib_n in batch_sizes.iter() {
Expand Down
31 changes: 30 additions & 1 deletion benches/fibonacci_lem.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration};

use anyhow::anyhow;
use criterion::{
black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup,
BenchmarkId, Criterion, SamplingMode,
Expand Down Expand Up @@ -113,14 +114,42 @@ fn fibo_prove<M: measurement::Measurement>(
);
}

fn rc_env() -> anyhow::Result<Vec<usize>> {
std::env::var("LURK_RC")
.map_err(|e| anyhow!("Reduction count env var isn't set: {e}"))
.and_then(|rc| {
let vec: anyhow::Result<Vec<usize>> = rc
.split(',')
.map(|rc| {
rc.parse::<usize>()
.map_err(|e| anyhow!("Failed to parse RC: {e}"))
})
.collect();
vec
})
}

fn noise_threshold_env() -> anyhow::Result<f64> {
std::env::var("LURK_BENCH_NOISE_THRESHOLD")
.map_err(|e| anyhow!("Noise threshold env var isn't set: {e}"))
.and_then(|nt| {
nt.parse::<f64>()
.map_err(|e| anyhow!("Failed to parse noise threshold: {e}"))
})
}

fn fibonacci_prove(c: &mut Criterion) {
tracing_subscriber::fmt::init();
set_bench_config();
tracing::debug!("{:?}", lurk::config::LURK_CONFIG);
let reduction_counts = [100, 600, 700, 800, 900];

let reduction_counts = rc_env().unwrap_or_else(|_| vec![100]);
let batch_sizes = [100, 200];
let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove");
group.sampling_mode(SamplingMode::Flat); // This can take a *while*
group.sample_size(10);
group.noise_threshold(noise_threshold_env().unwrap_or(0.05));

let state = State::init_lurk_state().rccell();

for fib_n in batch_sizes.iter() {
Expand Down
4 changes: 2 additions & 2 deletions justfile → benches/justfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Install with `cargo install just`
# Usage: `just --dotenv-filename /path/to/file.env <bench|gpu-bench>`
# Usage: `just --dotenv-filename /path/to/file.env <bench|gpu-bench> <args>`
# TODO: Move dotenv-filename into justfile once the feature is available
set dotenv-load

Expand Down Expand Up @@ -28,7 +28,7 @@ gpu-bench +benches:
env | grep -E "LURK|EC_GPU|CUDA"
if [ '{{benches}}' != '' ]; then
for bench in {{benches}}; do
cargo criterion --bench $bench --features "cuda" --message-format=json 2>&1 > {{commit}}.json
cargo criterion --bench $bench --features "cuda" --message-format=json 2>&1 > ../{{commit}}.json
done
else
echo "Invalid input, enter at least one non-empty string"
Expand Down

0 comments on commit 8aa4568

Please sign in to comment.