|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| name: Nightly
|
| permissions:
|
| contents: read
|
|
|
| on:
|
|
|
| workflow_dispatch:
|
|
|
|
|
| schedule:
|
| - cron: "0 2 * * *"
|
|
|
|
|
| env:
|
| UV_VERSION: "0.8.0"
|
| PYTHON_VERSION: "3.10"
|
| DOCKER_IMAGE_NAME_CPU: huggingface/lerobot-cpu:latest
|
| DOCKER_IMAGE_NAME_GPU: huggingface/lerobot-gpu:latest
|
|
|
|
|
| concurrency:
|
| group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
| cancel-in-progress: true
|
|
|
| jobs:
|
|
|
| build-docker-cpu-nightly:
|
| name: Build CPU Docker for Nightly
|
| runs-on:
|
| group: aws-general-8-plus
|
| outputs:
|
| image_tag: ${{ env.DOCKER_IMAGE_NAME_CPU }}
|
| steps:
|
| - name: Install Git LFS
|
| run: |
|
| sudo apt-get update
|
| sudo apt-get install git-lfs
|
| git lfs install
|
| - uses: actions/checkout@v4
|
| with:
|
| lfs: true
|
| persist-credentials: false
|
| - name: Set up Docker Buildx
|
| uses: docker/setup-buildx-action@v3
|
| with:
|
| cache-binary: false
|
| - name: Login to Docker Hub
|
| uses: docker/login-action@v3
|
| with:
|
| username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}
|
| password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}
|
| - name: Build and push Docker image CPU
|
| uses: docker/build-push-action@v6
|
| with:
|
| context: .
|
| file: ./docker/Dockerfile.user
|
| push: true
|
| tags: ${{ env.DOCKER_IMAGE_NAME_CPU }}
|
|
|
|
|
| build-docker-gpu-nightly:
|
| name: Build GPU Docker for Nightly
|
| runs-on:
|
| group: aws-general-8-plus
|
| outputs:
|
| image_tag: ${{ env.DOCKER_IMAGE_NAME_GPU }}
|
| steps:
|
| - name: Install Git LFS
|
| run: |
|
| sudo apt-get update
|
| sudo apt-get install git-lfs
|
| git lfs install
|
| - uses: actions/checkout@v4
|
| with:
|
| lfs: true
|
| persist-credentials: false
|
| - name: Set up Docker Buildx
|
| uses: docker/setup-buildx-action@v3
|
| with:
|
| cache-binary: false
|
| - name: Login to Docker Hub
|
| uses: docker/login-action@v3
|
| with:
|
| username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}
|
| password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}
|
| - name: Build and push Docker image GPU
|
| uses: docker/build-push-action@v6
|
| with:
|
| context: .
|
| file: ./docker/Dockerfile.internal
|
| push: true
|
| tags: ${{ env.DOCKER_IMAGE_NAME_GPU }}
|
|
|
|
|
| nightly-cpu-tests:
|
| name: Nightly CPU Tests
|
| needs: [build-docker-cpu-nightly]
|
| runs-on:
|
| group: aws-g6-4xlarge-plus
|
| env:
|
| HF_HOME: /home/user_lerobot/.cache/huggingface
|
| HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot
|
| TORCH_HOME: /home/user_lerobot/.cache/torch
|
| TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton
|
| container:
|
| image: ${{ needs.build-docker-cpu-nightly.outputs.image_tag }}
|
| options: --shm-size "16gb"
|
| credentials:
|
| username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}
|
| password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}
|
| defaults:
|
| run:
|
| shell: bash
|
| working-directory: /lerobot
|
| steps:
|
| - name: Run pytest on CPU
|
| run: pytest tests -vv --maxfail=10
|
| - name: Run end-to-end tests
|
| run: make test-end-to-end
|
|
|
|
|
| nightly-gpu-tests:
|
| name: Nightly GPU Tests
|
| needs: [build-docker-gpu-nightly]
|
| runs-on:
|
| group: aws-g6-4xlarge-plus
|
| env:
|
| HF_HOME: /home/user_lerobot/.cache/huggingface
|
| HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot
|
| TORCH_HOME: /home/user_lerobot/.cache/torch
|
| TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton
|
| container:
|
| image: ${{ needs.build-docker-gpu-nightly.outputs.image_tag }}
|
| options: --gpus all --shm-size "16gb"
|
| credentials:
|
| username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}
|
| password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}
|
| defaults:
|
| run:
|
| shell: bash
|
| working-directory: /lerobot
|
| steps:
|
| - name: Run pytest on GPU
|
| run: pytest tests -vv --maxfail=10
|
| - name: Run end-to-end tests
|
| run: make test-end-to-end
|
|
|
|
|
| nightly-multi-gpu-tests:
|
| name: Nightly Multi-GPU Tests
|
| needs: [build-docker-gpu-nightly]
|
| runs-on:
|
| group: aws-g4dn-12xlarge
|
| env:
|
| HF_HOME: /home/user_lerobot/.cache/huggingface
|
| HF_LEROBOT_HOME: /home/user_lerobot/.cache/huggingface/lerobot
|
| TORCH_HOME: /home/user_lerobot/.cache/torch
|
| TRITON_CACHE_DIR: /home/user_lerobot/.cache/triton
|
| CUDA_VISIBLE_DEVICES: "0,1,2,3"
|
| container:
|
| image: ${{ needs.build-docker-gpu-nightly.outputs.image_tag }}
|
| options: --gpus all --shm-size "16gb"
|
| credentials:
|
| username: ${{ secrets.DOCKERHUB_LEROBOT_USERNAME }}
|
| password: ${{ secrets.DOCKERHUB_LEROBOT_PASSWORD }}
|
| defaults:
|
| run:
|
| shell: bash
|
| working-directory: /lerobot
|
| steps:
|
| - name: Verify GPU availability
|
| run: |
|
| nvidia-smi
|
| python -c "import torch; print(f'PyTorch CUDA available: {torch.cuda.is_available()}'); print(f'Number of GPUs: {torch.cuda.device_count()}')"
|
|
|
| - name: Run multi-GPU training tests
|
|
|
| run: pytest tests -vv --maxfail=10 --ignore=tests/motors/
|
| timeout-minutes: 10
|
|
|