Skip to content

Add a custom runner

Add a custom runner #1

name: Build and test reusable workflow
run-name: ${{ inputs.run_name }} - ${{ inputs.python_version }} - ${{ inputs.runner_label || 'default'}}
on:
workflow_call:
inputs:
device:
description: Device
type: string
default: max1100
driver_version:
description: Driver version
type: string
default: rolling
runner_label:
description: Runner label, keep empty for default
type: string
default: ""
pytorch_ref:
description: PyTorch ref, keep empty for default
type: string
default: ""
pytorch_mode:
description: PyTorch mode, source or wheels
type: string
default: "source"
python_version:
description: Python version
type: string
required: true
upload_test_reports:
description: Upload test reports
type: boolean
default: false
ignore_errors:
description: Ignore test errors
type: boolean
default: false
skip_list:
description: Skip list
type: string
default: ""
run_name:
description: Custom run name
type: string
default: Build and test
build_llvm:
description: Build LLVM
type: boolean
default: false
enable_unskip:
description: Ignore pytest.skip
type: boolean
default: false
runner_version:
description: Runner label for version
type: string
default: runner-0.0.19
env_manager:
description: Environment manager
type: choice
options:
- default
- conda
- nobasekit
default: default
permissions: read-all
env:
TRITON_DISABLE_LINE_INFO: 1
TEST_UNSKIP: ${{ inputs.enable_unskip }}
jobs:
integration-tests:
name: Integration tests
runs-on: ${{ fromJson(inputs.runner_label && format('["{0}"]', inputs.runner_label) || format('["{0}", "{1}", "{2}"]', inputs.device, inputs.driver_version, inputs.runner_version)) }}
defaults:
run:
shell: bash -noprofile --norc -eo pipefail scripts/runner_${{ inputs.env_manager }}.sh"
envinputs.env-c "source /opt/intel/oneapi/setvars.sh > /dev/null; source {0}"

Check failure on line 81 in .github/workflows/build-test-reusable.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/build-test-reusable.yml

Invalid workflow file

You have an error in your yaml syntax on line 81
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Print inputs
run: |
cat <<EOF
${{ toJSON(inputs) }}
EOF
- name: Load pip cache
id: pip-cache
uses: ./.github/actions/load
env:
# Increase this value to reset cache
CACHE_NUMBER: 1
with:
path: $HOME/.cache/pip
key: pip-${{ inputs.python_version }}-${{ hashFiles('python/pyproject.toml', 'python/setup.py') }}-${{ env.CACHE_NUMBER }}
- name: Install Python ${{ inputs.python_version }}
uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python_version }}
- name: Setup PyTorch
uses: ./.github/actions/setup-pytorch
with:
repository: pytorch/pytorch
ref: ${{ inputs.pytorch_ref }}
mode: ${{ inputs.pytorch_mode }}
- name: Install pass_rate dependencies
run: |
pip install defusedxml
- name: Setup Triton
uses: ./.github/actions/setup-triton
with:
build_llvm: ${{ inputs.build_llvm }}
- name: Create test-triton command line
run: |
if [[ -n "${{ inputs.skip_list }}" ]]; then
skiplist="$GITHUB_WORKSPACE/scripts/skiplist/${{ inputs.skip_list }}"
elif [[ -n "${{ inputs.driver_version }}" ]]; then
skiplist="$GITHUB_WORKSPACE/scripts/skiplist/${{ inputs.driver_version }}"
else
skiplist="$GITHUB_WORKSPACE/scripts/skiplist/default"
fi
if [ -d "$skiplist" ]; then
skiplist="--skip-list $skiplist"
else
skiplist=
fi
{
echo SKIPLIST="$skiplist"
echo TRITON_TEST_CMD="bash -v -x scripts/test-triton.sh --warning-reports --skip-pytorch-install --reports-dir $GITHUB_WORKSPACE/reports ${{ inputs.ignore_errors && '--ignore-errors' || '' }} $skiplist"
} | tee -a $GITHUB_ENV
- name: Run unit tests
run: |
${{ env.TRITON_TEST_CMD }} --unit
- name: Run core tests
run: |
${{ env.TRITON_TEST_CMD }} --core --skip-pip-install
- name: Run interpreter tests
run: |
${{ env.TRITON_TEST_CMD }} --interpreter --skip-pip-install
- name: Run Tutorials
run: |
${{ env.TRITON_TEST_CMD }} --tutorial --skip-pip-install
- name: Run instrumentation tests
run: |
${{ env.TRITON_TEST_CMD }} --instrumentation --skip-pip-install
- name: Get transformers version
run: |
cd pytorch
TRANSFORMERS_VERSION="$(<.ci/docker/ci_commit_pins/huggingface.txt)"
echo "TRANSFORMERS_VERSION=$TRANSFORMERS_VERSION" | tee -a $GITHUB_ENV
- name: Install transformers
if: ${{ inputs.python_version != '3.12' }}
uses: ./.github/actions/install-dependency
with:
package: transformers
repository: huggingface/transformers
ref: ${{ env.TRANSFORMERS_VERSION }}
try-tag-prefix: v
- name: Run E2E test
if: ${{ inputs.python_version != '3.12' }}
run: |
# Set WORKSPACE for inductor_xpu_test.sh to make sure it creates "inductor_log" outside of pytorch cloned directory
export WORKSPACE=$GITHUB_WORKSPACE
cd pytorch
pip install pyyaml pandas scipy numpy psutil pyre_extensions torchrec
# TODO: Find the fastest Hugging Face model
$GITHUB_WORKSPACE/scripts/inductor_xpu_test.sh huggingface float32 inference accuracy xpu 0 static 1 0 AlbertForMaskedLM
# The script above always returns 0, so we need an additional check to see if the accuracy test passed
cat $WORKSPACE/inductor_log/*/*/*.csv
grep AlbertForMaskedLM $WORKSPACE/inductor_log/*/*/*.csv | grep -q ,pass,
- name: Save pip cache
if: ${{ steps.pip-cache.outputs.status == 'miss' }}
uses: ./.github/actions/save
with:
path: ${{ steps.pip-cache.outputs.path }}
dest: ${{ steps.pip-cache.outputs.dest }}
- name: Pass rate
run: |
source ./scripts/capture-hw-details.sh
python3 scripts/pass_rate.py --reports reports ${{ env.SKIPLIST }}
python3 scripts/pass_rate.py --reports reports --json ${{ env.SKIPLIST }} > pass_rate.json
python3 scripts/pass_rate.py --reports reports --suite tutorials --json ${{ env.SKIPLIST }} > pass_rate_tutorials.json
- name: Report environment details
run: |
source ./scripts/capture-hw-details.sh --quiet
cat <<EOF | tee .env
TIMESTAMP=$(date '+%Y%m%d%H%M%S')
GITHUB_RUN_ID=$GITHUB_RUN_ID
GITHUB_RUN_NUMBER=$GITHUB_RUN_NUMBER
GITHUB_RUN_ATTEMPT=$GITHUB_RUN_ATTEMPT
PYTHON_VERSION=${{ inputs.python_version }}
PYTORCH_REPO=$PYTORCH_REPO
PYTORCH_COMMIT_ID=$PYTORCH_COMMIT_ID
PYTORCH_VERSION=$PYTORCH_VERSION
TRITON_REPO=$GITHUB_REPOSITORY
LIBIGC1_VERSION=$LIBIGC1_VERSION
LEVEL_ZERO_VERSION=$LEVEL_ZERO_VERSION
GPU_DEVICE=$GPU_DEVICE
AGAMA_VERSION=$AGAMA_VERSION
EOF
- name: Upload pass rate report
# upload reports only for the default branch
if: github.ref_name == 'main'
uses: actions/upload-artifact@v4
with:
name: pass_rate-${{ inputs.python_version }}-${{ inputs.runner_label || inputs.driver_version }}
path: pass_rate*.json
- name: Upload tutorials performance report
uses: actions/upload-artifact@v4
with:
name: tutorials-${{ inputs.python_version }}-${{ inputs.runner_label || inputs.driver_version }}
if-no-files-found: warn
include-hidden-files: true
path: |
reports/*/*.csv
.env
- name: Upload test reports
if: inputs.upload_test_reports
uses: actions/upload-artifact@v4
with:
name: test-reports-${{ inputs.python_version }}-${{ inputs.runner_label || inputs.driver_version }}
path: reports