Skip to content

Olive Recipes CI

Olive Recipes CI #5

Workflow file for this run

name: Olive Recipes CI
permissions:
contents: read
on:
schedule:
- cron: '0 15 * * 5'
push:
branches:
- main
workflow_dispatch:
inputs:
test_scope:
type: choice
description: 'Which tests to run'
options:
- all
- linux-cpu-only
- linux-gpu-only
- windows-cpu-only
default: all
env:
PYTHON_VERSION: "3.10"
jobs:
linux-cpu:
name: Linux CPU - ${{ matrix.test.name }}
if: |
github.event.inputs.test_scope == 'all' ||
github.event.inputs.test_scope == 'linux-cpu-only' ||
github.event.inputs.test_scope == ''
runs-on: ubuntu-latest
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
test:
- name: "PTQ"
path: "intel-bert-base-uncased-mrpc/oci/cpu"
config: "ptq.json"
- name: "INC Smooth Quant"
path: "intel-bert-base-uncased-mrpc/oci/cpu"
config: "inc_smooth_quant.json"
- name: "ResNet-50 Session Param Tuning"
path: "microsoft-resnet-50/olive"
config: "resnet_cpu.json"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install Olive
run: pip install git+https://github.com/microsoft/Olive.git@main
- name: Install optimum-onnx for HuggingFace export
run: pip install optimum-onnx
- name: Install requirements
working-directory: ${{ matrix.test.path }}
run: |
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Run Olive workflow and verify output
working-directory: ${{ matrix.test.path }}
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
run: python ${{ github.workspace }}/.github/scripts/verify_model.py ${{ matrix.test.config }}
# ========================================
# GPU TESTS TEMPORARILY DISABLED and Will re-enable once pool hardware is upgraded
# ========================================
# linux-gpu:
# name: Linux GPU - ${{ matrix.name }}
# if: |
# github.event.inputs.test_scope == 'all' ||
# github.event.inputs.test_scope == 'linux-gpu-only' ||
# github.event.inputs.test_scope == ''
# runs-on: ["self-hosted", "1ES.Pool=olive-github-ubuntu2204-cuda-A10"]
# timeout-minutes: 180
# strategy:
# fail-fast: false
# max-parallel: 1
# matrix:
# include:
# - name: "Dora"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "dora.json"
# - name: "HQQ"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "hqq.json"
# - name: "Lmeval"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "lmeval.json"
# - name: "Lmeval-ONNX"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "lmeval_onnx.json"
# - name: "Loha"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "loha.json"
# - name: "Lokr"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "lokr.json"
# - name: "QLoRA"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "qlora.json"
# - name: "RTN"
# path: "meta-llama-Llama-3.2-1B-Instruct/olive"
# config: "rtn.json"
#
# steps:
# - name: DEBUG - Print system and GPU information
# run: |
# echo "=========================================="
# echo "DEBUG: This step is running!"
# echo "=========================================="
# uname -a || true
# echo "--- Attempting to get Azure VM metadata ---"
# curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2021-02-01" 2>/dev/null | jq '.' || echo "Metadata not available"
# echo "--- GPU Information ---"
# nvidia-smi --query-gpu=name,compute_cap,driver_version,memory.total --format=csv || echo "nvidia-smi failed"
# nvidia-smi || echo "nvidia-smi not found"
# echo "=========================================="
# echo "DEBUG: Step completed!"
# echo "=========================================="
#
# - uses: actions/checkout@v4
#
# - uses: actions/setup-python@v5
# with:
# python-version: ${{ env.PYTHON_VERSION }}
#
# - name: Install Olive
# run: pip install git+https://github.com/microsoft/Olive.git@main
#
# - name: Install ONNX Runtime GPU
# run: pip install onnxruntime-gpu
#
# - name: Install optimum-onnx for HuggingFace export
# run: pip install optimum-onnx
#
# - name: Install requirements
# working-directory: ${{ matrix.path }}
# run: |
# if [ -f requirements.txt ]; then
# pip install -r requirements.txt
# fi
#
# - name: Run Olive workflow and verify output
# working-directory: ${{ matrix.path }}
# env:
# HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
# run: python ${{ github.workspace }}/.github/scripts/verify_model.py ${{ matrix.config }}
#
# - name: Cleanup GPU
# if: always()
# run: |
# nvidia-smi
# python -c "import torch; torch.cuda.empty_cache()" || true
# - name: Upload artifacts
# if: always()
# uses: actions/upload-artifact@v4
# with:
# name: linux-gpu-${{ matrix.name }}-output
# path: ${{ matrix.path }}/models/
# retention-days: 7
windows-cpu:
name: Windows CPU - ${{ matrix.test.name }}
if: |
github.event.inputs.test_scope == 'all' ||
github.event.inputs.test_scope == 'windows-cpu-only' ||
github.event.inputs.test_scope == ''
runs-on: windows-latest
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
test:
- name: "PTQ"
path: "intel-bert-base-uncased-mrpc/oci/cpu"
config: "ptq.json"
# Temporarily disabled - has dtype issues
# - name: "INC Smooth Quant"
# path: "intel-bert-base-uncased-mrpc/oci/cpu"
# config: "inc_smooth_quant.json"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install Olive
run: pip install git+https://github.com/microsoft/Olive.git@main
- name: Install optimum-onnx for HuggingFace export
run: pip install optimum-onnx
- name: Install requirements
working-directory: ${{ matrix.test.path }}
shell: pwsh
run: |
if (Test-Path requirements.txt) { pip install -r requirements.txt }
- name: Run Olive workflow and verify output
working-directory: ${{ matrix.test.path }}
shell: pwsh
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
PYTHONIOENCODING: utf-8
run: python ${{ github.workspace }}/.github/scripts/verify_model.py ${{ matrix.test.config }}