diff --git a/.github/workflows/forks-build-on-cloudflare.yml b/.github/workflows/forks-build-on-cloudflare.yml new file mode 100644 index 0000000000..63bafa1093 --- /dev/null +++ b/.github/workflows/forks-build-on-cloudflare.yml @@ -0,0 +1,228 @@ +name: PR Deploy to Cloudflare Pages + +on: + pull_request_target: # Use pull_request_target instead of pull_request for fork security + types: [opened, synchronize, reopened] + +jobs: + deploy-pr: + name: Deploy PR to Cloudflare Pages + runs-on: ubuntu-latest + + # Only run on PRs from forks + # GitHub context: https://docs.github.com/en/actions/learn-github-actions/contexts#github-context + if: github.event.pull_request.head.repo.full_name != github.repository + + steps: + # Note: When using pull_request_target, this step is important for security + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR's head + fetch-depth: 0 # Required for Cloudflare Pages to have complete Git history + + # Setup Node.js for PostCSS + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + # Check if this is a Hugo site + - name: Check for Hugo + id: check-hugo + run: | + if [ -f "config.toml" ] || [ -f "config.yaml" ] || [ -f "config.json" ] || [ -f "hugo.yaml" ] || [ -f "hugo.toml" ] || [ -f "hugo.json" ] || [ -d "themes" ]; then + echo "is_hugo=true" >> $GITHUB_OUTPUT + echo "✅ Detected Hugo site" + else + echo "is_hugo=false" >> $GITHUB_OUTPUT + echo "⚠️ Not a Hugo site" + fi + + # Install PostCSS and other dependencies for Hugo + - name: Install PostCSS and dependencies + if: steps.check-hugo.outputs.is_hugo == 'true' + run: | + # Create package.json if it doesn't exist + if [ ! -f "package.json" ]; then + echo '{"name":"hugo-build","version":"1.0.0","private":true}' > package.json + fi + + # Install both globally and locally to ensure modules are found + npm install -g postcss postcss-cli + npm install --save-dev autoprefixer postcss postcss-cli + + # Create postcss.config.js if it doesn't exist + if [ ! -f "postcss.config.js" ]; then + echo 'module.exports = {plugins: {autoprefixer: {}}}' > postcss.config.js + fi + + echo "Installed PostCSS and dependencies:" + postcss --version + echo "Node modules directory:" + ls -la node_modules + + # Install Hugo if needed + - name: Setup Hugo + if: steps.check-hugo.outputs.is_hugo == 'true' + uses: peaceiris/actions-hugo@v2 + with: + hugo-version: 'latest' + extended: true + + # Build Hugo site + - name: Build with Hugo + if: steps.check-hugo.outputs.is_hugo == 'true' + run: hugo --minify + + # Check build output + - name: Verify Hugo output + if: steps.check-hugo.outputs.is_hugo == 'true' + run: | + if [ -d "public" ]; then + echo "✅ Hugo generated 'public' directory" + echo "Contents of public directory:" + ls -la public + else + echo "❌ Hugo did not generate 'public' directory" + fi + + # Debug directory structure + - name: Debug directory structure + run: | + echo "Current directory: $(pwd)" + echo "Directory contents:" + ls -la + echo "--------------------" + + if [ -d "public" ]; then + echo "✅ Found 'public' directory (Hugo output)" + echo "deploy_dir=public" >> $GITHUB_OUTPUT + elif [ -d "dist" ]; then + echo "✅ Found 'dist' directory" + echo "deploy_dir=dist" >> $GITHUB_OUTPUT + elif [ -d "build" ]; then + echo "✅ Found 'build' directory" + echo "deploy_dir=build" >> $GITHUB_OUTPUT + elif [ -d "_site" ]; then + echo "✅ Found '_site' directory" + echo "deploy_dir=_site" >> $GITHUB_OUTPUT + else + # Create a directory with a simple index.html if none exists + mkdir -p _preview + echo "
This is a preview of PR #${{ github.event.pull_request.number }} from ${{ github.event.pull_request.head.label }}
" > _preview/index.html + echo "deploy_dir=_preview" >> $GITHUB_OUTPUT + echo "Created fallback _preview directory with placeholder content" + fi + + # Set directory for deployment + - name: Set deployment directory + id: deploy-dir + run: | + if [ -d "public" ]; then + echo "dir=public" >> $GITHUB_OUTPUT + elif [ -d "dist" ]; then + echo "dir=dist" >> $GITHUB_OUTPUT + elif [ -d "build" ]; then + echo "dir=build" >> $GITHUB_OUTPUT + elif [ -d "_site" ]; then + echo "dir=_site" >> $GITHUB_OUTPUT + else + # Create a directory with a simple index.html if none exists + mkdir -p _preview + echo "This is a preview of PR #${{ github.event.pull_request.number }} from ${{ github.event.pull_request.head.label }}
" > _preview/index.html + echo "dir=_preview" >> $GITHUB_OUTPUT + echo "Created fallback _preview directory with placeholder content" + fi + + echo "Will deploy from directory: $(cat $GITHUB_OUTPUT | grep dir | cut -d= -f2)" + + # Get timestamp for build + - name: Get current timestamp + id: timestamp + run: echo "timestamp=$(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> $GITHUB_OUTPUT + + # Deploy to Cloudflare Pages using wrangler directly + - name: Deploy to Cloudflare Pages + id: cloudflare-deployment + continue-on-error: true # Continue workflow even if deployment fails + env: + CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }} + CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + run: | + # Install Wrangler + npm install -g wrangler + + # Deploy to Cloudflare Pages using the newer 'deploy' command + OUTPUT=$(wrangler pages deploy "${{ steps.deploy-dir.outputs.dir || 'public' }}" \ + --project-name="${{ secrets.CLOUDFLARE_PROJECT_NAME }}" \ + --branch="${{ github.head_ref || github.ref_name }}" \ + --commit-hash="${{ github.event.pull_request.head.sha }}" \ + --commit-message="${{ github.event.pull_request.title }}" \ + --commit-dirty=true) + + # Extract the deployment URL from the output + DEPLOYMENT_URL=$(echo "$OUTPUT" | grep -o 'https://[^ ]*' | head -1) + + # Print the output and URL for debugging + echo "$OUTPUT" + echo "Extracted URL: $DEPLOYMENT_URL" + + # Set the URL as an output + echo "url=$DEPLOYMENT_URL" >> $GITHUB_OUTPUT + + # Create or update PR comment with deployment info using GitHub CLI + - name: Create or update comment with gh CLI + if: always() + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + REPO: ${{ github.repository }} + DEPLOYMENT_URL: ${{ steps.cloudflare-deployment.outputs.url }} + TIMESTAMP: ${{ steps.timestamp.outputs.timestamp }} + SHORT_SHA: ${{ github.event.pull_request.head.sha }} + RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} + PR_LABEL: ${{ github.event.pull_request.head.label }} + run: | + # Determine comment content based on deployment success + if [ -n "$DEPLOYMENT_URL" ]; then + TITLE="🚀 PR Preview deployed successfully!" + URL_LINE="📝 Preview URL: $DEPLOYMENT_URL" + else + TITLE="❌ PR Preview deployment failed!" + URL_LINE="⚠️ The build for Cloudflare Pages failed to complete." + fi + + # Create a temporary file for the comment body + COMMENT_FILE=$(mktemp) + cat > "$COMMENT_FILE" << EOF + $TITLE + + $URL_LINE + + ✅ Last built: $TIMESTAMP + 📌 Commit: ${SHORT_SHA:0:7} + 🔍 [View build details]($RUN_URL) + + This preview was automatically generated from fork PR \`$PR_LABEL\`. + EOF + + # List comments to find any existing deployment comment + COMMENTS_FILE=$(mktemp) + gh api repos/$REPO/issues/$PR_NUMBER/comments > "$COMMENTS_FILE" + + # Search for an existing bot comment that mentions PR Preview + COMMENT_ID=$(jq -r '.[] | select(.user.login=="github-actions[bot]" and (.body | contains("PR Preview"))) | .id' "$COMMENTS_FILE") + + if [ -n "$COMMENT_ID" ]; then + # Update existing comment + echo "Updating existing comment ID: $COMMENT_ID" + gh api --method PATCH repos/$REPO/issues/comments/$COMMENT_ID -f body="$(cat $COMMENT_FILE)" + else + # Create new comment + echo "Creating new comment" + gh api repos/$REPO/issues/$PR_NUMBER/comments -f body="$(cat $COMMENT_FILE)" + fi + + # Clean up temp files + rm -f "$COMMENT_FILE" "$COMMENTS_FILE" diff --git a/content/guides/quickstart.md b/content/guides/quickstart.md index e974e6f0c7..ab4908cf4f 100644 --- a/content/guides/quickstart.md +++ b/content/guides/quickstart.md @@ -16,8 +16,6 @@ To authenticate your machine with W&B, generate an API key from your user profil ## Install the `wandb` library and log in -Install the `wandb` library and log in by following these steps: - {{< tabpane text=true >}} {{% tab header="Command Line" value="cli" %}} @@ -61,7 +59,7 @@ wandb.login() ## Start a run and track hyperparameters -In your Python script or notebook, initialize a W&B run object with [`wandb.init()`]({{< relref "/ref/python/run.md" >}}). Use a dictionary for the `config` parameter to specify hyperparameter names and values: +In your Python script or notebook, initialize a W&B run object with [`wandb.init()`]({{< relref "/ref/python/run.md" >}}). Use a dictionary for the `config` parameter to specify hyperparameter names and values. ```python run = wandb.init( @@ -73,7 +71,7 @@ run = wandb.init( ) ``` -A [run]({{< relref "/guides/models/track/runs/" >}}) serves as the core element of W&B, used to [track metrics]({{< relref "/guides/models/track/" >}}), [create logs]({{< relref "/guides/core/artifacts/" >}}), and more. +A [run]({{< relref "/guides/models/track/runs/" >}}) serves as the core element of W&B, used to [track metrics]({{< relref "/guides/models/track/" >}}), [create logs]({{< relref "/guides/models/track/log/" >}}), and more. ## Assemble the components @@ -110,19 +108,17 @@ for epoch in range(2, epochs): # run.log_code() ``` -Visit the W&B App at [wandb.ai/home](https://wandb.ai/home) to view recorded metrics such as accuracy and loss during each training step. +Visit [wandb.ai/home](https://wandb.ai/home) to view recorded metrics such as accuracy and loss and how they changed during each training step. The following image shows the loss and accuracy tracked from each run. Each run object appears in the **Runs** column with generated names. {{< img src="/images/quickstart/quickstart_image.png" alt="Shows loss and accuracy tracked from each run." >}} -The preceding image shows the loss and accuracy tracked from each run. Each run object appears in the **Runs** column with generated names. - ## Next steps Explore more features of the W&B ecosystem: -1. Review [W&B Integrations]({{< relref "guides/integrations/" >}}) to combine W&B with ML frameworks like PyTorch, ML libraries like Hugging Face, or services like SageMaker. +1. Read the [W&B Integration tutorials]({{< relref "guides/integrations/" >}}) that combine W&B with frameworks like PyTorch, libraries like Hugging Face, and services like SageMaker. 2. Organize runs, automate visualizations, summarize findings, and share updates with collaborators using [W&B Reports]({{< relref "/guides/core/reports/" >}}). 3. Create [W&B Artifacts]({{< relref "/guides/core/artifacts/" >}}) to track datasets, models, dependencies, and results throughout your machine learning pipeline. -4. Automate hyperparameter searches and explore models with [W&B Sweeps]({{< relref "/guides/models/sweeps/" >}}). -5. Analyze datasets, visualize model predictions, and share insights on a [central dashboard]({{< relref "/guides/models/tables/" >}}). -6. Access W&B AI Academy to learn about LLMs, MLOps, and W&B Models through hands-on [courses](https://wandb.me/courses). \ No newline at end of file +4. Automate hyperparameter searches and optimize models with [W&B Sweeps]({{< relref "/guides/models/sweeps/" >}}). +5. Analyze runs, visualize model predictions, and share insights on a [central dashboard]({{< relref "/guides/models/tables/" >}}). +6. Visit [W&B AI Academy](https://wandb.ai/site/courses/) to learn about LLMs, MLOps, and W&B Models through hands-on courses.