Skip to content

use micromamba so that PyTorch doesn't install CUDA #12

use micromamba so that PyTorch doesn't install CUDA

use micromamba so that PyTorch doesn't install CUDA #12

Workflow file for this run

name: deploy-book
on:
# Trigger the workflow on push to main branch
push:
branches:
- main
env:
BASE_URL: /${{ github.event.repository.name }}
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: "pages"
cancel-in-progress: false
jobs:
deploy-book:
runs-on: ubuntu-latest
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
pages: write
id-token: write
# Required for micromamba to activate
defaults:
run:
shell: "bash -l {0}"
steps:
- uses: actions/checkout@v3
# Install dependencies
- name: Set up Python via Micromamba
uses: mamba-org/setup-micromamba@v2
with:
environment-file: environment.yml
init-shell: bash
cache-environment: true
post-cleanup: "all"
# Build the book
- name: Build the book
run: |
jupyter-book build -W deep-learning-intro-for-hep
# Upload the book's HTML as an artifact
- name: Upload artifact
uses: actions/upload-pages-artifact@v2
with:
path: "deep-learning-intro-for-hep/_build/html"
# Deploy the book's HTML to GitHub Pages
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2