From d92fd5264ec6691e4bb040351aa29c649d7f4ac4 Mon Sep 17 00:00:00 2001 From: Logan Adams Date: Wed, 22 Jan 2025 10:37:42 -0800 Subject: [PATCH] Update A6000 workflows to use newer docker container - 24.09 vs 24.03 --- .github/workflows/nv-a6000.yml | 2 +- .github/workflows/nv-flash-attn.yml | 2 +- .github/workflows/nv-human-eval.yml | 2 +- .github/workflows/nv-sd.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/nv-a6000.yml b/.github/workflows/nv-a6000.yml index 0547431e3099..e6368db80df0 100644 --- a/.github/workflows/nv-a6000.yml +++ b/.github/workflows/nv-a6000.yml @@ -23,7 +23,7 @@ jobs: unit-tests: runs-on: [self-hosted, nvidia, a6000] container: - image: nvcr.io/nvidia/pytorch:24.03-py3 + image: nvcr.io/nvidia/pytorch:24.09-py3 ports: - 80 options: --gpus all --shm-size "8G" diff --git a/.github/workflows/nv-flash-attn.yml b/.github/workflows/nv-flash-attn.yml index 310972323043..4cc4f1919cde 100644 --- a/.github/workflows/nv-flash-attn.yml +++ b/.github/workflows/nv-flash-attn.yml @@ -18,7 +18,7 @@ jobs: unit-tests: runs-on: [self-hosted, nvidia, a6000] container: - image: nvcr.io/nvidia/pytorch:24.03-py3 + image: nvcr.io/nvidia/pytorch:24.09-py3 ports: - 80 options: --gpus all --shm-size "8G" diff --git a/.github/workflows/nv-human-eval.yml b/.github/workflows/nv-human-eval.yml index 2ecdf218b96a..63e906dfc955 100644 --- a/.github/workflows/nv-human-eval.yml +++ b/.github/workflows/nv-human-eval.yml @@ -11,7 +11,7 @@ jobs: unit-tests: runs-on: [self-hosted, nvidia, a6000] container: - image: nvcr.io/nvidia/pytorch:24.03-py3 + image: nvcr.io/nvidia/pytorch:24.09-py3 ports: - 80 options: --gpus all --shm-size "8G" diff --git a/.github/workflows/nv-sd.yml b/.github/workflows/nv-sd.yml index af406075b868..85b7e94bc084 100644 --- a/.github/workflows/nv-sd.yml +++ b/.github/workflows/nv-sd.yml @@ -27,7 +27,7 @@ jobs: sd-tests: runs-on: [self-hosted, nvidia, a6000] container: - image: nvcr.io/nvidia/pytorch:24.03-py3 + image: nvcr.io/nvidia/pytorch:24.09-py3 ports: - 80 options: --gpus all --shm-size "8G"