Files
valkey/.github/workflows/benchmark-release.yml
T
Roshan Khatri 9000e26ecf Pin workflow pip/go/npm dependencies for OpenSSF compliance (#3276)
Pin package manager dependencies in CI workflows to improve the Pinned-Dependencies
score in OpenSSF Scorecard.

Changes:
- benchmark-on-label.yml, benchmark-release.yml: add `--require-hashes`
  to `pip install` adding on valkey-perf-benchmark repo:
  https://github.com/valkey-io/valkey-perf-benchmark/pull/44
- ci.yml: pin `yamlfmt` to `v0.21.0` instead of `@latest`
- reply-schemas-linter.yml: use npm ci with `package-lock.json` instead
  of unpinned npm install, package files in `utils/reply-schema-linter/`

Signed-off-by: Roshaan Khatri <rvkhatri@amazon.com>
Signed-off-by: Roshan Khatri <rvkhatri@amazon.com>
2026-03-20 15:11:00 +01:00

289 lines
11 KiB
YAML

name: Compare Valkey Versions
on:
workflow_dispatch:
inputs:
version1:
description: "First version to compare (commit SHA, branch, or tag)"
required: true
type: string
version2:
description: "Second version to compare (commit SHA, branch, or tag)"
required: true
type: string
issue_id:
description: "Issue ID to comment results on"
required: true
type: string
runs:
description: "Number of benchmark runs per configuration"
required: false
type: number
default: 1
defaults:
run:
shell: "bash -Eeuo pipefail -x {0}"
permissions:
contents: read
pull-requests: write
issues: write
jobs:
benchmark:
if: github.repository == 'valkey-io/valkey'
strategy:
matrix:
include:
- arch: x86
machine: ec2-al-2023-pr-benchmarking-x86
config: benchmark-config-x86.json
- arch: arm64
machine: ec2-al-2023-pr-benchmarking-arm64
config: benchmark-config-arm.json
concurrency:
group: ${{ matrix.machine }}
cancel-in-progress: false
runs-on: ["self-hosted", "${{ matrix.machine }}"]
timeout-minutes: 7200
steps:
- name: Validate inputs
run: |
echo "Version 1: ${{ github.event.inputs.version1 }}"
echo "Version 2: ${{ github.event.inputs.version2 }}"
echo "Issue ID: ${{ github.event.inputs.issue_id }}"
echo "Architecture: ${{ matrix.arch }}"
echo "Config: ${{ matrix.config }}"
echo "Runs: ${{ github.event.inputs.runs }}"
# Validate issue ID is numeric
if ! [[ "${{ github.event.inputs.issue_id }}" =~ ^[0-9]+$ ]]; then
echo "Error: Issue ID must be a number"
exit 1
fi
# Validate runs is a positive number
if ! [[ "${{ github.event.inputs.runs }}" =~ ^[1-9][0-9]*$ ]]; then
echo "Error: Runs must be a positive number"
exit 1
fi
- name: Checkout valkey for latest benchmark.
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: valkey-io/valkey
ref: "unstable"
path: valkey_latest
fetch-depth: 0
persist-credentials: false
- name: Checkout valkey
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
path: valkey
- name: Checkout valkey-perf-benchmark
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: ${{ github.repository_owner }}/valkey-perf-benchmark
path: valkey-perf-benchmark
fetch-depth: 1
persist-credentials: false
- name: Set up Python
uses: kishaningithub/setup-python-amazon-linux@a326cdc792983fe0fbd04c81d3d62b59b6123a6c # v1.1.0
with:
python-version: "3.10"
cache: "pip"
- name: Install dependencies
working-directory: valkey-perf-benchmark
run: |
sudo dnf groupinstall "Development Tools" -y
sudo dnf install -y gcc gcc-c++ make \
python3-devel \
openssl-devel \
bzip2-devel \
libffi-devel
pip install --require-hashes -r requirements.txt
- name: Build latest valkey_latest
working-directory: valkey_latest
run: |
echo "Building latest valkey-benchmark for latest benchmark executable..."
# Clean any previous builds
make distclean || true
# Build valkey-benchmark with latest code
make -j$(nproc)
# Verify the binary was created
if [[ -f "src/valkey-benchmark" ]]; then
echo "✓ Successfully built latest valkey-benchmark"
ls -la src/valkey-benchmark
# Test the binary
echo "Testing valkey-benchmark binary..."
./src/valkey-benchmark --version || echo "Version check completed"
else
echo "Failed to build valkey-benchmark"
exit 1
fi
# Store the absolute path for later use
VALKEY_BENCHMARK_PATH="$(pwd)/src/valkey-benchmark"
echo "VALKEY_BENCHMARK_PATH=$VALKEY_BENCHMARK_PATH" >> $GITHUB_ENV
echo "Latest valkey-benchmark path: $VALKEY_BENCHMARK_PATH"
- name: Run benchmarks
working-directory: valkey-perf-benchmark
env:
EC2_X86_IP: ${{ secrets.EC2_X86_IP }}
EC2_ARM64_IP: ${{ secrets.EC2_ARM64_IP }}
run: |
# Set the target IP based on the matrix architecture
if [[ "${{ matrix.arch }}" == "x86" ]]; then
TARGET_IP=$EC2_X86_IP
echo "Using x86 machine IP"
elif [[ "${{ matrix.arch }}" == "arm64" ]]; then
TARGET_IP=$EC2_ARM64_IP
echo "Using ARM64 machine IP"
else
echo "Error: Unknown architecture: ${{ matrix.arch }}"
exit 1
fi
CONFIG_FILE="../valkey/.github/benchmark_configs/${{ matrix.config }}"
# Verify our custom valkey-benchmark exists
if [[ ! -f "$VALKEY_BENCHMARK_PATH" ]]; then
echo "Custom valkey-benchmark not found at: $VALKEY_BENCHMARK_PATH"
exit 1
fi
echo "Using custom valkey-benchmark from: $VALKEY_BENCHMARK_PATH"
# Base benchmark arguments with custom valkey-benchmark path
BENCHMARK_ARGS=(
--config "$CONFIG_FILE"
--commits ${{ github.event.inputs.version1 }} ${{ github.event.inputs.version2 }}
--valkey-benchmark-path "$VALKEY_BENCHMARK_PATH"
--target-ip $TARGET_IP
--results-dir "results"
--runs ${{ github.event.inputs.runs }}
)
echo "Running benchmarks with the following setup:"
echo "- Version 1: ${{ github.event.inputs.version1 }}"
echo "- Version 2: ${{ github.event.inputs.version2 }}"
echo "- Architecture: ${{ matrix.arch }}"
echo "- Config: ${{ matrix.config }}"
echo "- Using latest valkey-benchmark: $VALKEY_BENCHMARK_PATH"
echo "- This ensures both versions use the same (latest) benchmark tool for consistent results"
# Run benchmark with custom valkey-benchmark executable
python ./benchmark.py "${BENCHMARK_ARGS[@]}"
- name: Compare results
working-directory: valkey-perf-benchmark
run: |
# Find the actual result directories (they might have different names than input)
VERSION1_DIR=$(find ./results -maxdepth 1 -type d -name "*${{ github.event.inputs.version1 }}*" | head -1)
VERSION2_DIR=$(find ./results -maxdepth 1 -type d -name "*${{ github.event.inputs.version2 }}*" | head -1)
if [[ -z "$VERSION1_DIR" ]]; then
echo "Could not find results for version1: ${{ github.event.inputs.version1 }}"
echo "Available result directories:"
ls -la ./results/
exit 1
fi
if [[ -z "$VERSION2_DIR" ]]; then
echo "Could not find results for version2: ${{ github.event.inputs.version2 }}"
echo "Available result directories:"
ls -la ./results/
exit 1
fi
echo "Comparing results:"
echo "Version 1 (${{ github.event.inputs.version1 }}): $VERSION1_DIR"
echo "Version 2 (${{ github.event.inputs.version2 }}): $VERSION2_DIR"
# Generate RPS-focused comparison and graphs for GitHub comments
python utils/compare_benchmark_results.py \
--baseline "$VERSION1_DIR/metrics.json" \
--new "$VERSION2_DIR/metrics.json" \
--output ../comparison.md \
--metrics rps
- name: Upload artifacts
if: always()
continue-on-error: true
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: benchmark-results-${{ matrix.arch }}-${{ github.event.inputs.issue_id }}
path: |
./valkey-perf-benchmark/results/*
comparison.md
- name: Cleanup any running valkey processes and files
if: always()
continue-on-error: true
run: |
pkill -f valkey || echo "No valkey processes found to kill"
rm -rf *
combine-results:
needs: benchmark
runs-on: ubuntu-latest
steps:
- name: Download all artifacts
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: artifacts
- name: Combine results and create comprehensive report
run: |
echo "# Multi-Architecture Benchmark Comparison: ${{ github.event.inputs.version1 }} vs ${{ github.event.inputs.version2 }}" > combined_report.md
echo "" >> combined_report.md
echo "**Versions Compared:**" >> combined_report.md
echo "- Version 1: \`${{ github.event.inputs.version1 }}\`" >> combined_report.md
echo "- Version 2: \`${{ github.event.inputs.version2 }}\`" >> combined_report.md
echo "" >> combined_report.md
echo "**Runs:** ${{ github.event.inputs.runs }} per configuration" >> combined_report.md
echo "**Workflow Run:** [View Details](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> combined_report.md
echo "" >> combined_report.md
echo "---" >> combined_report.md
echo "" >> combined_report.md
# Process each architecture's results
for arch in x86 arm64; do
artifact_dir="artifacts/benchmark-results-${arch}-${{ github.event.inputs.issue_id }}"
if [[ -d "$artifact_dir" && -f "$artifact_dir/comparison.md" ]]; then
echo "## ${arch^^} Architecture Results" >> combined_report.md
echo "" >> combined_report.md
cat "$artifact_dir/comparison.md" >> combined_report.md
echo "" >> combined_report.md
echo "---" >> combined_report.md
echo "" >> combined_report.md
fi
done
- name: Comment on issue with combined results
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
// Read the combined report
const body = fs.readFileSync('combined_report.md', 'utf8');
await github.rest.issues.createComment({
issue_number: ${{ github.event.inputs.issue_id }},
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});