mirror of https://github.com/commaai/tinygrad.git
292 lines
14 KiB
YAML
292 lines
14 KiB
YAML
name: Benchmarks
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- master
|
|
- update_benchmark
|
|
|
|
jobs:
|
|
testmacbenchmark:
|
|
name: Mac Benchmark
|
|
runs-on: [self-hosted, macOS]
|
|
defaults:
|
|
run:
|
|
shell: bash -o pipefail {0}
|
|
if: github.repository_owner == 'tinygrad'
|
|
env:
|
|
PYTHONPATH: .
|
|
steps:
|
|
- name: Checkout Code
|
|
uses: actions/checkout@v4
|
|
- name: Symlink models and datasets
|
|
run: |
|
|
mkdir -p weights
|
|
ln -s ~/tinygrad/disassemblers/applegpu disassemblers/applegpu
|
|
ln -s ~/tinygrad/weights/sd-v1-4.ckpt weights/sd-v1-4.ckpt
|
|
ln -s ~/tinygrad/weights/bpe_simple_vocab_16e6.txt.gz weights/bpe_simple_vocab_16e6.txt.gz
|
|
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
|
|
ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz
|
|
# TODO: why is this test not reliable?
|
|
#- name: Run Stable Diffusion
|
|
# run: python3 examples/stable_diffusion.py --seed 0 --noshow --timing | tee sd.txt
|
|
- name: Run model inference benchmark
|
|
run: METAL=1 python3 test/external/external_model_benchmark.py
|
|
- name: Test speed vs torch
|
|
run: BIG=2 MPS=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
|
|
- name: Run Tensor Core GEMM
|
|
run: |
|
|
DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul.txt
|
|
DEBUG=2 HALF=1 python3 extra/gemm/simple_matmul.py | tee matmul_half.txt
|
|
- name: Run LLaMA
|
|
run: |
|
|
JIT=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_unjitted.txt
|
|
JIT=1 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_jitted.txt
|
|
- name: Run LLaMA with BEAM
|
|
run: JIT=1 BEAM=2 CACHELEVEL=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_beam.txt
|
|
- name: Run LLaMA 7B on 4 (virtual) GPUs
|
|
run: JIT=1 python3 examples/llama.py --gen 1 --size 7B --shard 4 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_four_gpu.txt
|
|
- name: Run GPT2
|
|
run: |
|
|
JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt
|
|
JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt
|
|
- name: Run GPT2 w HALF
|
|
run: JIT=1 HALF=1 python3 examples/gpt2.py --count 10 --temperature 0 --timing | tee gpt2_half.txt
|
|
- name: Run GPT2 w HALF/BEAM
|
|
run: JIT=1 HALF=1 BEAM=2 CACHELEVEL=0 python3 examples/gpt2.py --count 10 --temperature 0 --timing | tee gpt2_half_beam.txt
|
|
- name: Train MNIST
|
|
run: time PYTHONPATH=. TARGET_EVAL_ACC_PCT=97.3 python3 examples/beautiful_mnist.py | tee beautiful_mnist.txt
|
|
- name: Run 10 CIFAR training steps
|
|
run: STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar.txt
|
|
- name: Run 10 CIFAR training steps w HALF
|
|
run: STEPS=10 DEFAULT_FLOAT=HALF python3 examples/hlb_cifar10.py | tee train_cifar_half.txt
|
|
#- name: Run 10 CIFAR training steps w BF16
|
|
# run: STEPS=10 DEFAULT_FLOAT=BFLOAT16 python3 examples/hlb_cifar10.py | tee train_cifar_bf16.txt
|
|
# TODO: this is flaky too
|
|
# - name: Run 10 CIFAR training steps w winograd
|
|
# run: WINO=1 STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar_wino.txt
|
|
- uses: actions/upload-artifact@v4
|
|
with:
|
|
name: Speed (Mac)
|
|
path: |
|
|
onnx_inference_speed.csv
|
|
torch_speed.txt
|
|
llama_unjitted.txt
|
|
llama_jitted.txt
|
|
llama_beam.txt
|
|
llama_four_gpu.txt
|
|
gpt2_unjitted.txt
|
|
gpt2_jitted.txt
|
|
gpt2_half.txt
|
|
gpt2_half_beam.txt
|
|
matmul.txt
|
|
matmul_half.txt
|
|
sd.txt
|
|
beautiful_mnist.txt
|
|
train_cifar.txt
|
|
train_cifar_half.txt
|
|
train_cifar_bf16.txt
|
|
train_cifar_wino.txt
|
|
|
|
testnvidiabenchmark:
|
|
name: NVIDIA Benchmark
|
|
runs-on: [self-hosted, Linux, CUDA]
|
|
defaults:
|
|
run:
|
|
shell: bash -o pipefail {0}
|
|
if: github.repository_owner == 'tinygrad'
|
|
env:
|
|
PYTHONPATH: .
|
|
steps:
|
|
- name: Checkout Code
|
|
uses: actions/checkout@v4
|
|
- name: Print nvidia-smi
|
|
run: nvidia-smi
|
|
- name: Symlink models and datasets
|
|
run: |
|
|
mkdir -p weights
|
|
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
|
|
- name: Run model inference benchmark
|
|
run: CUDA=1 python3 test/external/external_model_benchmark.py
|
|
- name: Test speed vs torch
|
|
run: CUDA=1 BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
|
|
- name: Run Tensor Core GEMM (CUDA)
|
|
run: |
|
|
CUDA=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul.txt
|
|
CUDA=1 BFLOAT16=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul_bfloat16.txt
|
|
- name: Run Tensor Core GEMM (PTX)
|
|
run: CUDA=1 PTX=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul_ptx.txt
|
|
- name: Run LLaMA
|
|
run: |
|
|
CUDA=1 JIT=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_unjitted.txt
|
|
CUDA=1 JIT=1 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_jitted.txt
|
|
- name: Run LLaMA with BEAM
|
|
run: CUDA=1 JIT=1 BEAM=2 CACHELEVEL=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_beam.txt
|
|
- name: Run GPT2
|
|
run: |
|
|
CUDA=1 JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt
|
|
CUDA=1 JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt
|
|
- name: Run GPT2 w HALF
|
|
run: CUDA=1 JIT=1 HALF=1 python3 examples/gpt2.py --count 10 --temperature 0 --timing | tee gpt2_half.txt
|
|
- name: Run GPT2 w HALF/BEAM
|
|
run: CUDA=1 JIT=1 HALF=1 BEAM=2 CACHELEVEL=0 JIT_BATCH_SIZE=4 python3 examples/gpt2.py --count 10 --temperature 0 --timing | tee gpt2_half_beam.txt
|
|
- name: Train MNIST
|
|
run: time PYTHONPATH=. CUDA=1 TARGET_EVAL_ACC_PCT=97.3 python3 examples/beautiful_mnist.py | tee beautiful_mnist.txt
|
|
- name: Run 10 CIFAR training steps
|
|
run: CUDA=1 STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar.txt
|
|
- name: Run 10 CIFAR training steps w HALF
|
|
run: CUDA=1 STEPS=10 DEFAULT_FLOAT=HALF python3 examples/hlb_cifar10.py | tee train_cifar_half.txt
|
|
- name: Run 10 CIFAR training steps w BF16
|
|
run: CUDA=1 STEPS=10 DEFAULT_FLOAT=BFLOAT16 python3 examples/hlb_cifar10.py | tee train_cifar_bf16.txt
|
|
- name: Run full CIFAR training
|
|
run: time CUDA=1 DEFAULT_FLOAT=HALF LATEWINO=1 STEPS=1000 TARGET_EVAL_ACC_PCT=93.3 python3 examples/hlb_cifar10.py | tee train_cifar_one_gpu.txt
|
|
- uses: actions/upload-artifact@v4
|
|
with:
|
|
name: Speed (NVIDIA)
|
|
path: |
|
|
onnx_inference_speed.csv
|
|
torch_speed.txt
|
|
matmul.txt
|
|
matmul_bfloat16.txt
|
|
matmul_ptx.txt
|
|
llama_unjitted.txt
|
|
llama_jitted.txt
|
|
llama_beam.txt
|
|
gpt2_unjitted.txt
|
|
gpt2_jitted.txt
|
|
gpt2_half.txt
|
|
gpt2_half_beam.txt
|
|
beautiful_mnist.txt
|
|
train_cifar.txt
|
|
train_cifar_half.txt
|
|
train_cifar_bf16.txt
|
|
train_cifar_one_gpu.txt
|
|
|
|
testamdbenchmark:
|
|
name: tinybox Benchmark
|
|
runs-on: [self-hosted, Linux, tinybox]
|
|
defaults:
|
|
run:
|
|
shell: bash -o pipefail {0}
|
|
if: github.repository_owner == 'tinygrad'
|
|
env:
|
|
PYTHONPATH: .
|
|
steps:
|
|
- name: Checkout Code
|
|
uses: actions/checkout@v4
|
|
- name: Symlink models and datasets
|
|
run: |
|
|
mkdir -p weights
|
|
ln -s ~/tinygrad/weights/bpe_simple_vocab_16e6.txt.gz weights/bpe_simple_vocab_16e6.txt.gz
|
|
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
|
|
ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz
|
|
ln -s /raid/weights/mixtral-8x7b-32kseqlen weights/mixtral-8x7b-32kseqlen
|
|
ln -s /raid/weights/LLaMA-2 weights/LLaMA-2
|
|
mkdir -p extra/datasets
|
|
ln -s /raid/datasets/imagenet extra/datasets/imagenet
|
|
- name: Show off tinybox
|
|
run: /opt/rocm/bin/rocm-bandwidth-test
|
|
- name: Run model inference benchmark
|
|
run: LD_PRELOAD="/opt/rocm/lib/libhsa-runtime64.so" HSA=1 NOCLANG=1 python3 test/external/external_model_benchmark.py
|
|
# TODO: unstable on AMD
|
|
#- name: Test speed vs torch
|
|
# run: |
|
|
# python3 -c "import torch; print(torch.__version__)"
|
|
# LD_PRELOAD="/opt/rocm/lib/libhsa-runtime64.so" HSA=1 BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
|
|
- name: Run Tensor Core GEMM (HSA)
|
|
run: HSA=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul.txt
|
|
- name: Run Tensor Core GEMM (KFD)
|
|
run: KFD=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul_kfd.txt
|
|
- name: Run Stable Diffusion
|
|
run: HSA=1 python3 examples/stable_diffusion.py --seed 0 --noshow --timing | tee sd.txt
|
|
- name: Run LLaMA 7B
|
|
run: |
|
|
HSA=1 JIT=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_unjitted.txt
|
|
HSA=1 JIT=1 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_jitted.txt
|
|
- name: Run LLaMA 7B with BEAM
|
|
run: HSA=1 JIT=1 BEAM=2 CACHELEVEL=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_beam.txt
|
|
- name: Run LLaMA 7B on 4 GPUs
|
|
run: HSA=1 python3 examples/llama.py --gen 1 --size 7B --shard 4 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_four_gpu.txt
|
|
- name: Run LLaMA 7B on 6 GPUs
|
|
run: HSA=1 python3 examples/llama.py --gen 1 --size 7B --shard 6 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_six_gpu.txt
|
|
- name: Run LLaMA-2 70B
|
|
run: HSA=1 python3 examples/llama.py --gen 2 --size 70B --shard 6 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_2_70B.txt
|
|
- name: Run Mixtral 8x7B
|
|
run: time HSA=1 python3 examples/mixtral.py --temperature 0 --count 10 --timing | tee mixtral.txt
|
|
- name: Run GPT2
|
|
run: |
|
|
HSA=1 JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt
|
|
HSA=1 JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt
|
|
- uses: actions/upload-artifact@v4
|
|
with:
|
|
name: Speed (AMD)
|
|
path: |
|
|
onnx_inference_speed.csv
|
|
torch_speed.txt
|
|
llama_unjitted.txt
|
|
llama_jitted.txt
|
|
llama_beam.txt
|
|
llama_four_gpu.txt
|
|
llama_six_gpu.txt
|
|
llama_2_70B.txt
|
|
gpt2_unjitted.txt
|
|
gpt2_jitted.txt
|
|
matmul.txt
|
|
matmul_kfd.txt
|
|
sd.txt
|
|
mixtral.txt
|
|
|
|
testmoreamdbenchmark:
|
|
name: tinybox Training
|
|
runs-on: [self-hosted, Linux, tinybox]
|
|
defaults:
|
|
run:
|
|
shell: bash -o pipefail {0}
|
|
if: github.repository_owner == 'tinygrad'
|
|
env:
|
|
PYTHONPATH: .
|
|
steps:
|
|
- name: Checkout Code
|
|
uses: actions/checkout@v4
|
|
- name: Symlink models and datasets
|
|
run: |
|
|
mkdir -p weights
|
|
ln -s ~/tinygrad/weights/bpe_simple_vocab_16e6.txt.gz weights/bpe_simple_vocab_16e6.txt.gz
|
|
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
|
|
ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz
|
|
ln -s /raid/weights/mixtral-8x7b-32kseqlen weights/mixtral-8x7b-32kseqlen
|
|
ln -s /raid/weights/LLaMA-2 weights/LLaMA-2
|
|
mkdir -p extra/datasets
|
|
ln -s /raid/datasets/imagenet extra/datasets/imagenet
|
|
- name: Train MNIST
|
|
run: time PYTHONPATH=. HSA=1 TARGET_EVAL_ACC_PCT=97.3 python3 examples/beautiful_mnist.py | tee beautiful_mnist.txt
|
|
- name: Run 10 CIFAR training steps
|
|
run: HSA=1 STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar.txt
|
|
- name: Run 10 CIFAR training steps w HALF
|
|
run: HSA=1 STEPS=10 DEFAULT_FLOAT=HALF python3 examples/hlb_cifar10.py | tee train_cifar_half.txt
|
|
- name: Run 10 CIFAR training steps w BF16
|
|
run: HSA=1 STEPS=10 DEFAULT_FLOAT=BFLOAT16 python3 examples/hlb_cifar10.py | tee train_cifar_bf16.txt
|
|
- name: Run full CIFAR training w 1 GPU
|
|
run: time HSA=1 DEFAULT_FLOAT=HALF LATEWINO=1 STEPS=1000 TARGET_EVAL_ACC_PCT=93.3 python3 examples/hlb_cifar10.py | tee train_cifar_one_gpu.txt
|
|
- name: Run full CIFAR training steps w 6 GPUS
|
|
run: time HSA=1 DEFAULT_FLOAT=HALF STEPS=350 BS=1536 GPUS=6 TARGET_EVAL_ACC_PCT=93.3 python3 examples/hlb_cifar10.py | tee train_cifar_six_gpu.txt
|
|
- name: Run MLPerf resnet eval on training data
|
|
run: time HSA=1 MODEL=resnet python3 examples/mlperf/model_eval.py
|
|
- name: Run 10 MLPerf ResNet50 training steps (1 gpu)
|
|
run: HSA=1 BENCHMARK=10 BS=128 GPUS=1 MODEL=resnet python3 examples/mlperf/model_train.py | tee train_resnet_one_gpu.txt
|
|
- name: Run 10 MLPerf ResNet50 training steps (6 gpu)
|
|
run: HSA=1 BENCHMARK=10 BS=768 GPUS=6 MODEL=resnet python3 examples/mlperf/model_train.py | tee train_resnet.txt
|
|
- uses: actions/upload-artifact@v4
|
|
with:
|
|
name: Speed (AMD Training)
|
|
path: |
|
|
beautiful_mnist.txt
|
|
train_cifar.txt
|
|
train_cifar_half.txt
|
|
train_cifar_bf16.txt
|
|
train_cifar_wino.txt
|
|
train_cifar_one_gpu.txt
|
|
train_resnet.txt
|
|
train_resnet_one_gpu.txt
|
|
train_cifar_six_gpu.txt
|