diff --git a/.github/workflows/all-documents.yml b/.github/workflows/all-documents.yml
index ff7568c91c..58ce83b578 100644
--- a/.github/workflows/all-documents.yml
+++ b/.github/workflows/all-documents.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/article-api-docs.yml b/.github/workflows/article-api-docs.yml
index 4f87a815b9..e6552f4fa3 100644
--- a/.github/workflows/article-api-docs.yml
+++ b/.github/workflows/article-api-docs.yml
@@ -22,7 +22,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/auto-add-ready-for-doc-review.yml b/.github/workflows/auto-add-ready-for-doc-review.yml
index 7b58c76c72..4a78f5a76d 100644
--- a/.github/workflows/auto-add-ready-for-doc-review.yml
+++ b/.github/workflows/auto-add-ready-for-doc-review.yml
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Check team membership
id: membership_check
diff --git a/.github/workflows/check-broken-links-github-github.yml b/.github/workflows/check-broken-links-github-github.yml
index fc7ea6ff06..9ed65c2867 100644
--- a/.github/workflows/check-broken-links-github-github.yml
+++ b/.github/workflows/check-broken-links-github-github.yml
@@ -24,7 +24,7 @@ jobs:
REPORT_REPOSITORY: github/docs-content
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# To prevent issues with cloning early access content later
persist-credentials: 'false'
diff --git a/.github/workflows/close-on-invalid-label.yaml b/.github/workflows/close-on-invalid-label.yaml
index 715ff94260..7c7e12b3d4 100644
--- a/.github/workflows/close-on-invalid-label.yaml
+++ b/.github/workflows/close-on-invalid-label.yaml
@@ -37,7 +37,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'pull_request_target' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'pull_request_target' }}
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index b5484c2f8c..39a961119a 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -33,7 +33,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7
with:
languages: javascript # comma separated list of values from {go, python, javascript, java, cpp, csharp, ruby}
diff --git a/.github/workflows/confirm-internal-staff-work-in-docs.yml b/.github/workflows/confirm-internal-staff-work-in-docs.yml
index f2b4cb4b21..776157ce8a 100644
--- a/.github/workflows/confirm-internal-staff-work-in-docs.yml
+++ b/.github/workflows/confirm-internal-staff-work-in-docs.yml
@@ -83,7 +83,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'pull_request_target' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'pull_request_target' }}
with:
diff --git a/.github/workflows/content-lint-markdown.yml b/.github/workflows/content-lint-markdown.yml
index f097e34e36..f00366bc9a 100644
--- a/.github/workflows/content-lint-markdown.yml
+++ b/.github/workflows/content-lint-markdown.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Node and dependencies
uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/content-linter-rules-docs.yml b/.github/workflows/content-linter-rules-docs.yml
index fe1dc03ca7..4d17f0cc2a 100644
--- a/.github/workflows/content-linter-rules-docs.yml
+++ b/.github/workflows/content-linter-rules-docs.yml
@@ -25,7 +25,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/copy-api-issue-to-internal.yml b/.github/workflows/copy-api-issue-to-internal.yml
index c630786ddf..4f52dd60d3 100644
--- a/.github/workflows/copy-api-issue-to-internal.yml
+++ b/.github/workflows/copy-api-issue-to-internal.yml
@@ -73,7 +73,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'workflow_dispatch' && github.repository == 'github/docs-internal' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'workflow_dispatch' && github.repository == 'github/docs-internal' }}
with:
diff --git a/.github/workflows/count-translation-corruptions.yml b/.github/workflows/count-translation-corruptions.yml
index 8549c30fe2..0c8b901f0e 100644
--- a/.github/workflows/count-translation-corruptions.yml
+++ b/.github/workflows/count-translation-corruptions.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)
diff --git a/.github/workflows/create-changelog-pr.yml b/.github/workflows/create-changelog-pr.yml
index 3de39fcb2d..626af25477 100644
--- a/.github/workflows/create-changelog-pr.yml
+++ b/.github/workflows/create-changelog-pr.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@v6.0.0
- name: 'Ensure ${{ env.CHANGELOG_FILE }} exists'
run: |
diff --git a/.github/workflows/delete-orphan-translation-files.yml b/.github/workflows/delete-orphan-translation-files.yml
index a6907b97ef..d97fa24b30 100644
--- a/.github/workflows/delete-orphan-translation-files.yml
+++ b/.github/workflows/delete-orphan-translation-files.yml
@@ -60,10 +60,10 @@ jobs:
language_repo: github/docs-internal.ko-kr
steps:
- - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Checkout the language-specific repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: ${{ matrix.language_repo }}
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
diff --git a/.github/workflows/docs-review-collect.yml b/.github/workflows/docs-review-collect.yml
index e388d1eeab..f97bfbca4a 100644
--- a/.github/workflows/docs-review-collect.yml
+++ b/.github/workflows/docs-review-collect.yml
@@ -20,7 +20,7 @@ jobs:
steps:
- name: Check out repo content
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Setup Node.js
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
diff --git a/.github/workflows/dont-delete-assets.yml b/.github/workflows/dont-delete-assets.yml
index e2ed22fe2d..17ff82230c 100644
--- a/.github/workflows/dont-delete-assets.yml
+++ b/.github/workflows/dont-delete-assets.yml
@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/dont-delete-features.yml b/.github/workflows/dont-delete-features.yml
index 950c3377a3..16a0cea8f5 100644
--- a/.github/workflows/dont-delete-features.yml
+++ b/.github/workflows/dont-delete-features.yml
@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/enterprise-dates.yml b/.github/workflows/enterprise-dates.yml
index 280f516ddd..d638bf8ba3 100644
--- a/.github/workflows/enterprise-dates.yml
+++ b/.github/workflows/enterprise-dates.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/enterprise-release-issue.yml b/.github/workflows/enterprise-release-issue.yml
index f642024155..eabaf5bf0f 100644
--- a/.github/workflows/enterprise-release-issue.yml
+++ b/.github/workflows/enterprise-release-issue.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/first-responder-v2-prs-collect.yml b/.github/workflows/first-responder-v2-prs-collect.yml
index cdad2ed0e0..4ebc9dcbac 100644
--- a/.github/workflows/first-responder-v2-prs-collect.yml
+++ b/.github/workflows/first-responder-v2-prs-collect.yml
@@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
# Add to the FR project
# and set type to "Maintenance"
diff --git a/.github/workflows/generate-code-scanning-query-lists.yml b/.github/workflows/generate-code-scanning-query-lists.yml
index a3e261803b..bf4aa979fc 100644
--- a/.github/workflows/generate-code-scanning-query-lists.yml
+++ b/.github/workflows/generate-code-scanning-query-lists.yml
@@ -32,12 +32,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Checkout codeql repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/codeql
path: codeql
diff --git a/.github/workflows/headless-tests.yml b/.github/workflows/headless-tests.yml
index 6b1f78b76a..535d6c036e 100644
--- a/.github/workflows/headless-tests.yml
+++ b/.github/workflows/headless-tests.yml
@@ -37,7 +37,7 @@ jobs:
timeout-minutes: 60
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/setup-elasticsearch
diff --git a/.github/workflows/hubber-contribution-help.yml b/.github/workflows/hubber-contribution-help.yml
index b80ff46690..a105b8d304 100644
--- a/.github/workflows/hubber-contribution-help.yml
+++ b/.github/workflows/hubber-contribution-help.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- id: membership_check
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
diff --git a/.github/workflows/index-autocomplete-search.yml b/.github/workflows/index-autocomplete-search.yml
index 79582d796a..408fecf430 100644
--- a/.github/workflows/index-autocomplete-search.yml
+++ b/.github/workflows/index-autocomplete-search.yml
@@ -23,14 +23,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- uses: ./.github/actions/setup-elasticsearch
if: ${{ github.event_name == 'pull_request' }}
- - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
repository: github/docs-internal-data
diff --git a/.github/workflows/index-general-search-pr.yml b/.github/workflows/index-general-search-pr.yml
index 96127559d3..dd1ebf2788 100644
--- a/.github/workflows/index-general-search-pr.yml
+++ b/.github/workflows/index-general-search-pr.yml
@@ -37,10 +37,10 @@ jobs:
if: github.repository == 'github/docs-internal'
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Clone docs-internal-data
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal-data
# This works because user `docs-bot` has read access to that private repo.
diff --git a/.github/workflows/index-general-search.yml b/.github/workflows/index-general-search.yml
index c3930e1412..683833c816 100644
--- a/.github/workflows/index-general-search.yml
+++ b/.github/workflows/index-general-search.yml
@@ -87,7 +87,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
@@ -115,10 +115,10 @@ jobs:
language: ${{ fromJSON(needs.figureOutMatrix.outputs.matrix) }}
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Clone docs-internal-data
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal-data
# This works because user `docs-bot` has read access to that private repo.
diff --git a/.github/workflows/keep-caches-warm.yml b/.github/workflows/keep-caches-warm.yml
index 04daa6d9c0..e63de3ecd6 100644
--- a/.github/workflows/keep-caches-warm.yml
+++ b/.github/workflows/keep-caches-warm.yml
@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/link-check-daily.yml b/.github/workflows/link-check-daily.yml
index 1f0876acbd..63e8933ea4 100644
--- a/.github/workflows/link-check-daily.yml
+++ b/.github/workflows/link-check-daily.yml
@@ -23,7 +23,7 @@ jobs:
run: gh --version
- name: Check out repo's default branch
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
@@ -37,7 +37,7 @@ jobs:
- name: Check out docs-early-access too, if internal repo
if: ${{ github.repository == 'github/docs-internal' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-early-access
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
diff --git a/.github/workflows/link-check-on-pr.yml b/.github/workflows/link-check-on-pr.yml
index c91fe77e93..811c47752f 100644
--- a/.github/workflows/link-check-on-pr.yml
+++ b/.github/workflows/link-check-on-pr.yml
@@ -26,7 +26,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/lint-code.yml b/.github/workflows/lint-code.yml
index 7506716f6d..da2094ec5a 100644
--- a/.github/workflows/lint-code.yml
+++ b/.github/workflows/lint-code.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/lint-entire-content-data-markdown.yml b/.github/workflows/lint-entire-content-data-markdown.yml
index 42699314f9..d59a2b89a7 100644
--- a/.github/workflows/lint-entire-content-data-markdown.yml
+++ b/.github/workflows/lint-entire-content-data-markdown.yml
@@ -23,7 +23,7 @@ jobs:
run: gh --version
- name: Check out repo's default branch
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Node and dependencies
uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/local-dev.yml b/.github/workflows/local-dev.yml
index 6653b5f41b..8e69474ed2 100644
--- a/.github/workflows/local-dev.yml
+++ b/.github/workflows/local-dev.yml
@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/moda-allowed-ips.yml b/.github/workflows/moda-allowed-ips.yml
index b4b9cd6f4a..c188dbd569 100644
--- a/.github/workflows/moda-allowed-ips.yml
+++ b/.github/workflows/moda-allowed-ips.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
- name: Update list of allowed IPs
run: |
diff --git a/.github/workflows/move-content.yml b/.github/workflows/move-content.yml
index 2737da134d..9df8f84e9a 100644
--- a/.github/workflows/move-content.yml
+++ b/.github/workflows/move-content.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/move-ready-to-merge-pr.yaml b/.github/workflows/move-ready-to-merge-pr.yaml
index af3e6e0a9d..8c9b4df873 100644
--- a/.github/workflows/move-ready-to-merge-pr.yaml
+++ b/.github/workflows/move-ready-to-merge-pr.yaml
@@ -31,7 +31,7 @@ jobs:
repo-token: ${{ secrets.DOCS_BOT_PAT_BASE }}
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/move-reopened-issues-to-triage.yaml b/.github/workflows/move-reopened-issues-to-triage.yaml
index 0e6ce5ea1b..80726a5c10 100644
--- a/.github/workflows/move-reopened-issues-to-triage.yaml
+++ b/.github/workflows/move-reopened-issues-to-triage.yaml
@@ -45,7 +45,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
diff --git a/.github/workflows/needs-sme-stale-check.yaml b/.github/workflows/needs-sme-stale-check.yaml
index 186f613d8b..a847c4997d 100644
--- a/.github/workflows/needs-sme-stale-check.yaml
+++ b/.github/workflows/needs-sme-stale-check.yaml
@@ -35,7 +35,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
diff --git a/.github/workflows/needs-sme-workflow.yml b/.github/workflows/needs-sme-workflow.yml
index 9e930545c1..1d3b6bef88 100644
--- a/.github/workflows/needs-sme-workflow.yml
+++ b/.github/workflows/needs-sme-workflow.yml
@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
with:
@@ -41,7 +41,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
with:
diff --git a/.github/workflows/no-response.yaml b/.github/workflows/no-response.yaml
index 71478cffec..11c7bccf36 100644
--- a/.github/workflows/no-response.yaml
+++ b/.github/workflows/no-response.yaml
@@ -57,7 +57,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
diff --git a/.github/workflows/notify-about-deployment.yml b/.github/workflows/notify-about-deployment.yml
index c16fecc57b..dfefe3de87 100644
--- a/.github/workflows/notify-about-deployment.yml
+++ b/.github/workflows/notify-about-deployment.yml
@@ -26,7 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/orphaned-features-check.yml b/.github/workflows/orphaned-features-check.yml
index f1fba43136..c499e1e38f 100644
--- a/.github/workflows/orphaned-features-check.yml
+++ b/.github/workflows/orphaned-features-check.yml
@@ -26,7 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)
diff --git a/.github/workflows/orphaned-files-check.yml b/.github/workflows/orphaned-files-check.yml
index ba2e398c28..db410c1b5b 100644
--- a/.github/workflows/orphaned-files-check.yml
+++ b/.github/workflows/orphaned-files-check.yml
@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)
diff --git a/.github/workflows/os-ready-for-review.yml b/.github/workflows/os-ready-for-review.yml
index 630a443c8e..7aef9268f4 100644
--- a/.github/workflows/os-ready-for-review.yml
+++ b/.github/workflows/os-ready-for-review.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo content
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Check if this run was triggered by a member of the docs team
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
diff --git a/.github/workflows/package-lock-lint.yml b/.github/workflows/package-lock-lint.yml
index 1b9581f945..105d074170 100644
--- a/.github/workflows/package-lock-lint.yml
+++ b/.github/workflows/package-lock-lint.yml
@@ -25,7 +25,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Setup Node.js
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
diff --git a/.github/workflows/purge-fastly.yml b/.github/workflows/purge-fastly.yml
index cc3c74ca88..ad0eae2a4b 100644
--- a/.github/workflows/purge-fastly.yml
+++ b/.github/workflows/purge-fastly.yml
@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/readability.yml b/.github/workflows/readability.yml
index ef68d9c798..d2dddce677 100644
--- a/.github/workflows/readability.yml
+++ b/.github/workflows/readability.yml
@@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo with full history
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
diff --git a/.github/workflows/ready-for-doc-review.yml b/.github/workflows/ready-for-doc-review.yml
index 15d07669a1..b34ef6a5aa 100644
--- a/.github/workflows/ready-for-doc-review.yml
+++ b/.github/workflows/ready-for-doc-review.yml
@@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo content
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml
index bb915d1b85..d0e172e28b 100644
--- a/.github/workflows/repo-sync.yml
+++ b/.github/workflows/repo-sync.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Sync repo to branch
uses: repo-sync/github-sync@3832fe8e2be32372e1b3970bbae8e7079edeec88
diff --git a/.github/workflows/review-comment.yml b/.github/workflows/review-comment.yml
index 78aff73f53..e048d59b9e 100644
--- a/.github/workflows/review-comment.yml
+++ b/.github/workflows/review-comment.yml
@@ -38,7 +38,7 @@ jobs:
PR_NUMBER: ${{ github.event.pull_request.number }}
steps:
- name: check out repo content
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Set APP_URL
diff --git a/.github/workflows/reviewers-content-systems.yml b/.github/workflows/reviewers-content-systems.yml
index d66638401a..654707564b 100644
--- a/.github/workflows/reviewers-content-systems.yml
+++ b/.github/workflows/reviewers-content-systems.yml
@@ -36,7 +36,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
- name: Add content systems as a reviewer
uses: ./.github/actions/retry-command
diff --git a/.github/workflows/reviewers-dependabot.yml b/.github/workflows/reviewers-dependabot.yml
index 2ff85c6feb..d02144daff 100644
--- a/.github/workflows/reviewers-dependabot.yml
+++ b/.github/workflows/reviewers-dependabot.yml
@@ -37,7 +37,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
- name: Add dependabot as a reviewer
uses: ./.github/actions/retry-command
diff --git a/.github/workflows/reviewers-docs-engineering.yml b/.github/workflows/reviewers-docs-engineering.yml
index 8746c50cdb..f6bdbb269d 100644
--- a/.github/workflows/reviewers-docs-engineering.yml
+++ b/.github/workflows/reviewers-docs-engineering.yml
@@ -47,7 +47,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
- name: Add docs engineering as a reviewer
uses: ./.github/actions/retry-command
diff --git a/.github/workflows/reviewers-legal.yml b/.github/workflows/reviewers-legal.yml
index 4079dc2ff6..59ad01a5cd 100644
--- a/.github/workflows/reviewers-legal.yml
+++ b/.github/workflows/reviewers-legal.yml
@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@v5
+ uses: actions/checkout@v6.0.0
- name: Get changed files
id: changed_files
diff --git a/.github/workflows/site-policy-sync.yml b/.github/workflows/site-policy-sync.yml
index 57dbccfbf7..cc2e24c2f7 100644
--- a/.github/workflows/site-policy-sync.yml
+++ b/.github/workflows/site-policy-sync.yml
@@ -27,10 +27,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout docs-internal
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: checkout public site-policy
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/site-policy
token: ${{ secrets.API_TOKEN_SITEPOLICY }}
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 1048f56e5f..f3ac4f2fec 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -38,7 +38,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
diff --git a/.github/workflows/sync-audit-logs.yml b/.github/workflows/sync-audit-logs.yml
index d29530591a..d49b5d649a 100644
--- a/.github/workflows/sync-audit-logs.yml
+++ b/.github/workflows/sync-audit-logs.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/sync-codeql-cli.yml b/.github/workflows/sync-codeql-cli.yml
index a8e067ec0e..b68c6617f5 100644
--- a/.github/workflows/sync-codeql-cli.yml
+++ b/.github/workflows/sync-codeql-cli.yml
@@ -30,11 +30,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
# Check out a nested repository inside of previous checkout
- name: Checkout semmle-code repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out
diff --git a/.github/workflows/sync-graphql.yml b/.github/workflows/sync-graphql.yml
index d3358a237d..4831581977 100644
--- a/.github/workflows/sync-graphql.yml
+++ b/.github/workflows/sync-graphql.yml
@@ -23,7 +23,7 @@ jobs:
ignored-types: ${{ steps.sync.outputs.ignored-types }}
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Run updater scripts
id: sync
@@ -82,7 +82,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
with:
slack_channel_id: ${{ secrets.DOCS_ALERTS_SLACK_CHANNEL_ID }}
diff --git a/.github/workflows/sync-openapi.yml b/.github/workflows/sync-openapi.yml
index 941df7895c..e2474d481d 100644
--- a/.github/workflows/sync-openapi.yml
+++ b/.github/workflows/sync-openapi.yml
@@ -30,11 +30,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
# Check out a nested repository inside of previous checkout
- name: Checkout rest-api-description repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out
@@ -42,7 +42,7 @@ jobs:
path: rest-api-description
ref: ${{ inputs.SOURCE_BRANCH }}
- - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out
diff --git a/.github/workflows/sync-secret-scanning.yml b/.github/workflows/sync-secret-scanning.yml
index 1700dc6859..b8f62db784 100644
--- a/.github/workflows/sync-secret-scanning.yml
+++ b/.github/workflows/sync-secret-scanning.yml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/test-changed-content.yml b/.github/workflows/test-changed-content.yml
index a1f2e45894..9581106ca7 100644
--- a/.github/workflows/test-changed-content.yml
+++ b/.github/workflows/test-changed-content.yml
@@ -27,7 +27,7 @@ jobs:
# Each of these ifs needs to be repeated at each step to make sure the required check still runs
# Even if if doesn't do anything
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index e9892fffce..238b7dd1fa 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -87,7 +87,7 @@ jobs:
# Each of these ifs needs to be repeated at each step to make sure the required check still runs
# Even if if doesn't do anything
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/setup-elasticsearch
if: ${{ matrix.name == 'search' || matrix.name == 'languages' }}
diff --git a/.github/workflows/triage-issue-comments.yml b/.github/workflows/triage-issue-comments.yml
index 740249159c..6b96d0f48f 100644
--- a/.github/workflows/triage-issue-comments.yml
+++ b/.github/workflows/triage-issue-comments.yml
@@ -43,7 +43,7 @@ jobs:
}
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/triage-issues.yml b/.github/workflows/triage-issues.yml
index dd2082efc1..4b5d1080c8 100644
--- a/.github/workflows/triage-issues.yml
+++ b/.github/workflows/triage-issues.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/triage-pull-requests.yml b/.github/workflows/triage-pull-requests.yml
index 551aa65b4b..463d08a4e3 100644
--- a/.github/workflows/triage-pull-requests.yml
+++ b/.github/workflows/triage-pull-requests.yml
@@ -23,7 +23,7 @@ jobs:
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/triage-stale-check.yml b/.github/workflows/triage-stale-check.yml
index e04e0b2387..c88030bc17 100644
--- a/.github/workflows/triage-stale-check.yml
+++ b/.github/workflows/triage-stale-check.yml
@@ -44,7 +44,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
@@ -72,7 +72,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
diff --git a/.github/workflows/triage-unallowed-contributions.yml b/.github/workflows/triage-unallowed-contributions.yml
index 96ccd5a686..c3fea077d2 100644
--- a/.github/workflows/triage-unallowed-contributions.yml
+++ b/.github/workflows/triage-unallowed-contributions.yml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Get files changed
uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd
diff --git a/.github/workflows/validate-asset-images.yml b/.github/workflows/validate-asset-images.yml
index 1c9ff04f49..b3eef046db 100644
--- a/.github/workflows/validate-asset-images.yml
+++ b/.github/workflows/validate-asset-images.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
diff --git a/.github/workflows/validate-github-github-docs-urls.yml b/.github/workflows/validate-github-github-docs-urls.yml
index d53df2636b..488f795dbf 100644
--- a/.github/workflows/validate-github-github-docs-urls.yml
+++ b/.github/workflows/validate-github-github-docs-urls.yml
@@ -34,10 +34,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo's default branch
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
repository: github/github
diff --git a/.github/workflows/validate-openapi-check.yml b/.github/workflows/validate-openapi-check.yml
index 510f29752e..93e3460acd 100644
--- a/.github/workflows/validate-openapi-check.yml
+++ b/.github/workflows/validate-openapi-check.yml
@@ -28,7 +28,7 @@ jobs:
if: github.repository == 'github/docs-internal'
steps:
- name: Checkout
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
diff --git a/content/actions/how-tos/manage-workflow-runs/manually-run-a-workflow.md b/content/actions/how-tos/manage-workflow-runs/manually-run-a-workflow.md
index a84ae5173b..ef0dc91a3f 100644
--- a/content/actions/how-tos/manage-workflow-runs/manually-run-a-workflow.md
+++ b/content/actions/how-tos/manage-workflow-runs/manually-run-a-workflow.md
@@ -87,6 +87,6 @@ gh run watch
When using the REST API, you configure the `inputs` and `ref` as request body parameters. If the inputs are omitted, the default values defined in the workflow file are used.
> [!NOTE]
-> You can define up to 10 `inputs` for a `workflow_dispatch` event.
+> You can define up to {% ifversion fpt or ghec %}25 {% else %}10 {% endif %} `inputs` for a `workflow_dispatch` event.
For more information about using the REST API, see [AUTOTITLE](/rest/actions/workflows#create-a-workflow-dispatch-event).
diff --git a/content/billing/concepts/billing-cycles.md b/content/billing/concepts/billing-cycles.md
index ecf4d13543..ae3ffc19be 100644
--- a/content/billing/concepts/billing-cycles.md
+++ b/content/billing/concepts/billing-cycles.md
@@ -26,13 +26,17 @@ Your billing experience depends on whether your products are metered, volume-bas
## Billing cycles for metered products
-Metered products have a fixed **billing period** that starts at 00:00:00 UTC on the first day of each month and ends at 23:59:59 UTC on the last day of the month.
+Metered products, and all payments made using an Azure subscription ID, have a fixed **billing period** that starts at 00:00:00 UTC on the first day of each month and ends at 23:59:59 UTC on the last day of the month.
-At the end of each month, your metered usage is calculated and scheduled to be billed on your **bill cycle day**.
+At the end of each month, your metered usage is calculated and scheduled to be billed on your **billing date**. Accounts using an Azure subscription ID can access their specific billing date in the Azure commerce portal. For users with other payment methods:
-{% ifversion fpt %}For personal accounts and organizations, your bill cycle day is typically the day you started a paid plan (not necessarily when the account was created).{% elsif ghec %}Your bill cycle day is typically determined by when you converted from a trial to a paid enterprise account.{% endif %} For example, if you {% ifversion fpt %}started a paid plan{% elsif ghec %}converted from a trial{% endif %} on the 15th of a month, you will be billed on the 15th of each subsequent month.
+* **Personal and organization accounts:** This is typically the day you started a paid plan (not necessarily when the account was created).
+* **Enterprise accounts:** This is typically determined by when you converted from a trial to a paid enterprise account.
-> [!NOTE] If you are paying via an Azure subscription ID, your **billing period** will run from the first day of each month to the last day of the month. To access your specific **bill cycle day**, please visit the Azure commerce portal.
+For example, if you started a paid plan or converted from a trial on the 15th of a month, you will be billed on the 15th of each subsequent month.
+
+> [!NOTE]
+> From **December 1, 2025**, all self-serve, metered {% data variables.product.prodname_ghe_cloud %} accounts that pay by credit card will migrate to a **billing date** of 1st of the month. See [Billing date standardized to the first of the month for self-serve credit card metered Enterprise customers](https://github.blog/changelog/2025-11-17-billing-date-standardized-to-the-first-of-the-month-for-self-serve-credit-card-metered-enterprise-customers-now-generally-available/) in the changelog.
## Billing cycles for volume-based products
diff --git a/content/billing/get-started/how-billing-works.md b/content/billing/get-started/how-billing-works.md
index 371f53b9c1..c7a3721317 100644
--- a/content/billing/get-started/how-billing-works.md
+++ b/content/billing/get-started/how-billing-works.md
@@ -61,9 +61,11 @@ If required, {% data variables.product.prodname_ghe_cloud %} accounts can reques
Each account has a **billing date** and a **billing cycle**.
-For credit card and PayPal payments, the billing date is the day you started a paid plan (not necessarily when the account was created). For example, if you started a paid plan on the 15th of a month, you will be billed on the 15th of each subsequent month. For payments using an Azure subscription ID, the billing date is available in the Azure commerce portal.
+For **personal and organization accounts** set up for credit card and PayPal payments, the **billing date** is typically the day you started a paid plan (not necessarily when the account was created). For example, if you started a paid plan on the 15th of a month, you will be billed on the 15th of each subsequent month. For payments using an Azure subscription ID, the billing date is available in the Azure commerce portal.
-Most users pay for {% data variables.product.github %} using metered billing. The billing cycle for all metered products is a fixed period from the first day to the last day of the month.
+For **enterprise accounts**, your **billing date** will vary. See [AUTOTITLE](/billing/concepts/billing-cycles).
+
+Most users pay for {% data variables.product.github %} using metered billing. The **billing cycle** for all metered products is a fixed period from the first day to the last day of the month.
### Authorization holds
diff --git a/content/copilot/how-tos/troubleshoot-copilot/view-logs.md b/content/copilot/how-tos/troubleshoot-copilot/view-logs.md
index 7f682f4ddf..f09afe0540 100644
--- a/content/copilot/how-tos/troubleshoot-copilot/view-logs.md
+++ b/content/copilot/how-tos/troubleshoot-copilot/view-logs.md
@@ -118,6 +118,22 @@ Alternatively, you can open the log folder for {% data variables.product.prodnam
* Use: Ctrl+Shift+P
1. Type "Logs", and then select **Developer: Open Extension Logs Folder** from the list.
+## Enabling debug mode
+
+If you find the log file doesn't contain enough information to resolve an issue, it may help to enable debug logging temporarily. This can be especially helpful for debugging network-related issues.
+
+1. Open the {% data variables.product.prodname_vscode_command_palette_shortname %}
+ * For Mac:
+ * Use: Shift+Command+P
+ * For Windows or Linux:
+ * Use: Ctrl+Shift+P
+1. Type "Developer", then select **Developer: Set Log Level**.
+1. Type "{% data variables.product.github %}", then select the {% data variables.product.github %} extension you're troubleshooting:
+ * **{% data variables.copilot.copilot_chat %}** for the {% data variables.copilot.copilot_chat_short %} extension.
+ * **{% data variables.product.github %}** for the {% data variables.copilot.copilot_extension %}.
+1. Select **Trace** from the dropdown list.
+1. When you have the information you need, disable debug mode by repeating steps 1 through 4 and returning the logging level to **Info**.
+
## Viewing network connectivity diagnostics logs
If you encounter problems connecting to {% data variables.product.prodname_copilot %} due to network restrictions, firewalls, or your proxy setup, use the following troubleshooting steps.
diff --git a/next.config.ts b/next.config.ts
index b3106dd10c..3bc08e9c78 100644
--- a/next.config.ts
+++ b/next.config.ts
@@ -37,7 +37,7 @@ const config: NextConfig = {
},
// Don't use automatic Next.js logging in dev unless the log level is `debug` or higher
// See `src/observability/logger/README.md` for log levels
- logging: getLogLevelNumber() < 3 ? false : {},
+ logging: getLogLevelNumber() < 3 ? undefined : {},
async rewrites() {
const DEFAULT_VERSION = 'free-pro-team@latest'
return productIds.map((productId) => {
@@ -48,6 +48,11 @@ const config: NextConfig = {
})
},
+ webpack: (webpackConfig) => {
+ webpackConfig.resolve.fallback = { fs: false, async_hooks: false }
+ return webpackConfig
+ },
+
// Turbopack is the default bundler in Next.js 16
// Keep webpack config for now to support both bundlers
@@ -63,13 +68,6 @@ const config: NextConfig = {
},
},
- webpack: (webpackConfig) => {
- webpackConfig.experiments = webpackConfig.experiments || {}
- webpackConfig.experiments.topLevelAwait = true
- webpackConfig.resolve.fallback = { fs: false, async_hooks: false }
- return webpackConfig
- },
-
// https://nextjs.org/docs/api-reference/next.config.js/compression
compress: false,
@@ -79,18 +77,6 @@ const config: NextConfig = {
// the CDN marks the cached content as "fresh".
generateEtags: false,
- experimental: {
- // The output of our getServerSideProps() return large chunks of
- // data because it contains our rendered Markdown.
- // The default, for a "Large Page Data" warning is 128KB
- // but many of our pages are much larger.
- // The warning is: https://nextjs.org/docs/messages/large-page-data
- largePageDataBytes: 1024 * 1024, // 1 MB
-
- // This makes it so that going Back will scroll to the previous position
- scrollRestoration: true,
- },
-
compiler: {
styledComponents: true,
},
diff --git a/package-lock.json b/package-lock.json
index 147d603561..2923c76cba 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -43,7 +43,7 @@
"file-type": "21.0.0",
"flat": "^6.0.1",
"github-slugger": "^2.0.0",
- "glob": "11.0.2",
+ "glob": "11.1.0",
"hast-util-from-parse5": "^8.0.3",
"hast-util-to-string": "^3.0.1",
"hastscript": "^9.0.1",
@@ -57,7 +57,7 @@
"is-svg": "6.0.0",
"javascript-stringify": "^2.1.0",
"js-cookie": "^3.0.5",
- "js-yaml": "^4.1.0",
+ "js-yaml": "^4.1.1",
"liquidjs": "^10.16.7",
"lodash": "^4.17.21",
"lodash-es": "^4.17.21",
@@ -2383,6 +2383,7 @@
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
+ "license": "ISC",
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
@@ -2396,9 +2397,10 @@
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
- "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
+ "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
+ "license": "MIT",
"engines": {
"node": ">=12"
},
@@ -2410,6 +2412,7 @@
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
+ "license": "MIT",
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
@@ -2426,6 +2429,7 @@
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "license": "MIT",
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
@@ -3326,15 +3330,6 @@
"node": ">=0.10"
}
},
- "node_modules/@pkgjs/parseargs": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
- "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
- "optional": true,
- "engines": {
- "node": ">=14"
- }
- },
"node_modules/@pkgr/core": {
"version": "0.2.9",
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.2.9.tgz",
@@ -5646,6 +5641,7 @@
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
+ "dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
@@ -6843,6 +6839,8 @@
},
"node_modules/eastasianwidth": {
"version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
+ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
"license": "MIT"
},
"node_modules/ecdsa-sig-formatter": {
@@ -8512,11 +8510,12 @@
}
},
"node_modules/foreground-child": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
- "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz",
+ "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==",
+ "license": "ISC",
"dependencies": {
- "cross-spawn": "^7.0.0",
+ "cross-spawn": "^7.0.6",
"signal-exit": "^4.0.1"
},
"engines": {
@@ -8530,6 +8529,7 @@
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "license": "ISC",
"engines": {
"node": ">=14"
},
@@ -8755,14 +8755,14 @@
"license": "ISC"
},
"node_modules/glob": {
- "version": "11.0.2",
- "resolved": "https://registry.npmjs.org/glob/-/glob-11.0.2.tgz",
- "integrity": "sha512-YT7U7Vye+t5fZ/QMkBFrTJ7ZQxInIUjwyAjVj84CYXqgBdv30MFUPGnBR6sQaVq6Is15wYJUsnzTuWaGRBhBAQ==",
- "license": "ISC",
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-11.1.0.tgz",
+ "integrity": "sha512-vuNwKSaKiqm7g0THUBu2x7ckSs3XJLXE+2ssL7/MfTGPLLcrJQ/4Uq1CjPTtO5cCIiRxqvN6Twy1qOwhL0Xjcw==",
+ "license": "BlueOak-1.0.0",
"dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^4.0.1",
- "minimatch": "^10.0.0",
+ "foreground-child": "^3.3.1",
+ "jackspeak": "^4.1.1",
+ "minimatch": "^10.1.1",
"minipass": "^7.1.2",
"package-json-from-dist": "^1.0.0",
"path-scurry": "^2.0.0"
@@ -8789,11 +8789,12 @@
}
},
"node_modules/glob/node_modules/minimatch": {
- "version": "10.0.1",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.1.tgz",
- "integrity": "sha512-ethXTt3SGGR+95gudmqJ1eNhRO7eGEGIgYA9vnPatK4/etz2MEVDno5GMCibdMTuBMyElzIlgxMna3K94XDIDQ==",
+ "version": "10.1.1",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
+ "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
+ "license": "BlueOak-1.0.0",
"dependencies": {
- "brace-expansion": "^2.0.1"
+ "@isaacs/brace-expansion": "^5.0.0"
},
"engines": {
"node": "20 || >=22"
@@ -10210,9 +10211,10 @@
"license": "ISC"
},
"node_modules/jackspeak": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.0.1.tgz",
- "integrity": "sha512-cub8rahkh0Q/bw1+GxP7aeSe29hHHn2V4m29nnDlvCdlgU+3UGxkZp7Z53jLUdpX3jdTO0nJZUDl3xvbWc2Xog==",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.1.1.tgz",
+ "integrity": "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ==",
+ "license": "BlueOak-1.0.0",
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
@@ -10221,9 +10223,6 @@
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
- },
- "optionalDependencies": {
- "@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/javascript-stringify": {
@@ -10267,7 +10266,9 @@
"license": "MIT"
},
"node_modules/js-yaml": {
- "version": "4.1.0",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
@@ -14915,6 +14916,7 @@
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
@@ -14927,12 +14929,14 @@
"node_modules/string-width-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "license": "MIT"
},
"node_modules/string-width-cjs/node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
@@ -15059,6 +15063,7 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
@@ -16818,6 +16823,7 @@
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
@@ -16834,6 +16840,7 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
diff --git a/package.json b/package.json
index b28b2e8332..0845c9a8aa 100644
--- a/package.json
+++ b/package.json
@@ -188,7 +188,7 @@
"file-type": "21.0.0",
"flat": "^6.0.1",
"github-slugger": "^2.0.0",
- "glob": "11.0.2",
+ "glob": "11.1.0",
"hast-util-from-parse5": "^8.0.3",
"hast-util-to-string": "^3.0.1",
"hastscript": "^9.0.1",
@@ -202,7 +202,7 @@
"is-svg": "6.0.0",
"javascript-stringify": "^2.1.0",
"js-cookie": "^3.0.5",
- "js-yaml": "^4.1.0",
+ "js-yaml": "^4.1.1",
"liquidjs": "^10.16.7",
"lodash": "^4.17.21",
"lodash-es": "^4.17.21",
diff --git a/src/ai-tools/lib/call-models-api.ts b/src/ai-tools/lib/call-models-api.ts
index dda311baa0..e08638e3c3 100644
--- a/src/ai-tools/lib/call-models-api.ts
+++ b/src/ai-tools/lib/call-models-api.ts
@@ -34,10 +34,30 @@ interface ChatCompletionResponse {
}
}
-export async function callModelsApi(promptWithContent: ChatCompletionRequest): Promise {
+export async function callModelsApi(
+ promptWithContent: ChatCompletionRequest,
+ verbose = false,
+): Promise {
let aiResponse: ChatCompletionChoice
+ // Set default model if none specified
+ if (!promptWithContent.model) {
+ promptWithContent.model = 'openai/gpt-4o'
+ if (verbose) {
+ console.log('ā ļø No model specified, using default: openai/gpt-4o')
+ }
+ }
+
try {
+ // Create an AbortController for timeout handling
+ const controller = new AbortController()
+ const timeoutId = setTimeout(() => controller.abort(), 180000) // 3 minutes
+
+ const startTime = Date.now()
+ if (verbose) {
+ console.log(`š Making API request to GitHub Models using ${promptWithContent.model}...`)
+ }
+
const response = await fetch(modelsCompletionsEndpoint, {
method: 'post',
body: JSON.stringify(promptWithContent),
@@ -45,16 +65,80 @@ export async function callModelsApi(promptWithContent: ChatCompletionRequest): P
'Content-Type': 'application/json',
Authorization: `Bearer ${process.env.GITHUB_TOKEN}`,
'X-GitHub-Api-Version': '2022-11-28',
- Accept: 'Accept: application/vnd.github+json',
+ Accept: 'application/vnd.github+json',
},
+ signal: controller.signal,
})
+ const fetchTime = Date.now() - startTime
+ if (verbose) {
+ console.log(`ā±ļø API response received in ${fetchTime}ms`)
+ }
+
+ clearTimeout(timeoutId)
+
+ if (!response.ok) {
+ let errorMessage = `HTTP error! status: ${response.status} - ${response.statusText}`
+
+ // Try to get more detailed error information
+ try {
+ const errorBody = await response.json()
+ if (errorBody.error && errorBody.error.message) {
+ errorMessage += ` - ${errorBody.error.message}`
+ }
+ } catch {
+ // If we can't parse error body, continue with basic error
+ }
+
+ // Add helpful hints for common errors
+ if (response.status === 401) {
+ errorMessage += ' (Check your GITHUB_TOKEN)'
+ } else if (response.status === 400) {
+ errorMessage += ' (This may be due to an invalid model or malformed request)'
+ } else if (response.status === 429) {
+ errorMessage += ' (Rate limit exceeded - try again later)'
+ }
+
+ throw new Error(errorMessage)
+ }
+
const data: ChatCompletionResponse = await response.json()
+
+ if (!data.choices || data.choices.length === 0) {
+ throw new Error('No response choices returned from API')
+ }
+
aiResponse = data.choices[0]
+
+ if (verbose) {
+ const totalTime = Date.now() - startTime
+ console.log(`ā
Total API call completed in ${totalTime}ms`)
+
+ if (data.usage) {
+ console.log(
+ `š Tokens: ${data.usage.prompt_tokens} prompt + ${data.usage.completion_tokens} completion = ${data.usage.total_tokens} total`,
+ )
+ }
+ }
} catch (error) {
- console.error('Error calling GitHub Models REST API')
+ if (error instanceof Error) {
+ if (error.name === 'AbortError') {
+ throw new Error('API call timed out after 3 minutes')
+ }
+ console.error('Error calling GitHub Models REST API:', error.message)
+ }
throw error
}
- return aiResponse.message.content
+ return cleanAIResponse(aiResponse.message.content)
+}
+
+// Helper function to clean up AI response content
+function cleanAIResponse(content: string): string {
+ // Remove markdown code blocks
+ return content
+ .replace(/^```[\w]*\n/gm, '') // Remove opening code blocks
+ .replace(/\n```$/gm, '') // Remove closing code blocks at end
+ .replace(/\n```\n/gm, '\n') // Remove standalone closing code blocks
+ .trim()
}
diff --git a/src/ai-tools/prompts/intro.md b/src/ai-tools/prompts/intro.md
index ddf4eeccbb..5a0b6be7e9 100644
--- a/src/ai-tools/prompts/intro.md
+++ b/src/ai-tools/prompts/intro.md
@@ -2,37 +2,20 @@ You are an expert SEO content optimizer specializing in GitHub documentation.
Your task is to analyze a GitHub Docs content file and generate or optimize
the intro frontmatter property following Google's meta description best practices.
-## Your mission
-
-Generate a single, concise intro (one simple sentence maximum - NO colons, NO detailed explanations) that:
+## Core Requirements
-* Starts with an action verb (e.g., "Learn," "Discover," "Access," "Explore," "Configure," "Set up," "Build")
-* **Uses developer-friendly, direct language** - avoid marketing jargon and corporate buzzwords
-* **Prioritizes conciseness over completeness** - cut unnecessary words ruthlessly
-* Accurately summarizes the content's core value proposition
-* Includes relevant keywords naturally without stuffing
-* Follows Google's snippet guidelines (descriptive, informative, compelling)
-* Is version-agnostic (no {% ifversion %} blocks, but {% data variables.* %} and {% data reusables.* %} are acceptable)
-* Matches the content type (article/category/mapTopic) requirements
-* **Goes beyond title restatement** - summarizes the complete article value, not just rephrasing the title
-* **Lists concrete steps or outcomes** - what users will actually do or accomplish
-* **Limits lists to 2-3 items maximum** - avoid long comma-separated sequences that feel overwhelming
+**Primary constraints (must-haves):**
+* Start with action verb ("Learn," "Access," "Explore," "Configure," "Set up," "Build")
+* One sentence maximum - NO colons, NO detailed explanations
+* Avoid buzzwords: "leverage," "optimize," "maximize," "enhance," "streamline," "empower," "revolutionize," "seamlessly," "comprehensive," "enterprise-grade," "cutting-edge," "innovative," "game-changing," "next-generation," "world-class," "best-in-class," "state-of-the-art," "industry-leading," "robust," "scalable," "mission-critical," "synergistic," "holistic," "strategic," "transformative"
+* Different approach than title - don't start with same words/phrases
+* Lists 2-3 concrete outcomes maximum
-## SEO scoring criteria (1-10 scale)
-
-**10-9 (Excellent)**: Strong action verb, comprehensive content summary, optimal keyword density, clear unique value beyond title, perfect length
-**8-7 (Good)**: Action verb present, good content representation, decent keywords, some unique value, appropriate length
-**6-5 (Fair)**: Weak action verb or missing, partial content coverage, basic keywords, minimal value beyond title
-**4-3 (Poor)**: No action verb, limited content representation, few relevant keywords, mostly restates title
-**2-1 (Very Poor)**: Vague or misleading, no clear value proposition, poor keyword usage, completely redundant with title
-
-## Analysis process
-
-1. **Content resolution**: Keep {% data variables.* %} and {% data reusables.* %} but avoid {% ifversion %} blocks
-2. **Content analysis**: Identify the article's purpose, target audience, key concepts, and user outcomes
-3. **Category detection**: For index pages, analyze child content themes and collective value
-
-4. **SEO optimization**: Use strong action verbs, developer-friendly language, concrete outcomes, and relevant keywords while avoiding corporate buzzwords
+**Secondary optimizations (nice-to-haves):**
+* Include relevant keywords naturally
+* Version-agnostic ({% data variables.* %} OK, avoid {% ifversion %})
+* Follow Google snippet guidelines
+* Cut unnecessary words ruthlessly
**Content Summarization vs. Title Restatement**:
@@ -47,7 +30,7 @@ Generate a single, concise intro (one simple sentence maximum - NO colons, NO de
- Better: "Use {% data variables.product.prodname_copilot %} chat and code completion to research syntax, practice coding, and master new programming languages faster"
ā
**Use concise, developer-friendly language ({% data variables.* %} OK)**:
-- Better intro: "Evaluate use cases, configure security settings, and run pilot trials to successfully deploy {% data variables.copilot.copilot_coding_agent %} in your org"
+- Better intro: "Evaluate use cases, configure security settings, and run pilot trials to deploy {% data variables.copilot.copilot_coding_agent %} in your org"
ā **Avoid overly long lists and colon constructions**:
- Too long: "Scope issues, pick suitable tasks, iterate via PR comments, add repo instructions, enable MCP tools, and preinstall dependencies"
@@ -55,24 +38,13 @@ Generate a single, concise intro (one simple sentence maximum - NO colons, NO de
- Better: "Scope tasks, configure custom instructions, and iterate on pull requests to improve {% data variables.copilot.copilot_coding_agent %} performance"
- Better: "Use {% data variables.product.prodname_copilot %} features like chat and code completion to research syntax, build programs, and learn new programming languages faster"
-**Tone Guidelines**:
-- **Developer-friendly**: Use direct, practical language
-- **Concise over complete**: Cut words ruthlessly
-- **Action-oriented**: List what users will actually do
-- **Avoid buzzwords**: Skip marketing language and corporate jargon
-- **Use concrete verbs**: Instead of "maximize/optimize/enhance" ā use "improve," "boost," "increase," or just describe the outcome directly
-- **Limit lists**: Maximum 2-3 items in comma-separated lists - prefer flowing sentences over exhaustive enumerations
-- **Avoid colon constructions**: Don't use "Do X: detailed explanation of A, B, and C" format - keep it simple and direct
-- **Avoid title similarity**: Don't start with the same words/phrases as the article title - approach the topic from a different angle
+## Quality Checklist
-The intro should answer: "What specific steps will I take?" rather than "What will this comprehensive solution provide?"
-
-## Analysis Process
-
-1. **First Draft**: Generate an initial improved intro following all guidelines above
-2. **Title Check**: Compare your draft to the article title - if it starts with similar words, rewrite with a different approach
-3. **Self-Review**: Evaluate your draft against the SEO scoring criteria and tone guidelines
-4. **Refinement**: If the draft contains buzzwords, weak verbs, title similarity, or scores below 8/10, create a refined version
+ā
**Structure**: Action verb + 2-3 concrete outcomes + under 350 characters
+ā
**Language**: Direct, practical developer language (no marketing jargon)
+ā
**Focus**: What users will DO, not what solution "provides"
+ā
**Uniqueness**: Different angle from article title
+ā
**Simplicity**: No colons, no complex lists, flowing sentences
## Output format
@@ -84,27 +56,12 @@ Title: "[Article title from frontmatter]"
Original intro: "[Current intro from the article, or "No intro" if none exists]"
-
-Original SEO score: [X]/10
-------------------------
-
-Improved intro: "[Single, concise intro that summarizes the article's full content value, not just restating the title]"
-
-
-Improved SEO score: [X]/10
+SEO-friendly alternative: "[Single, concise intro that summarizes the article's full content value, not just restating the title]"
------------------------
```
-Note: The improved score should reflect your best attempt after internal refinement.
-
## Character limits by content type
-**Priority: Conciseness over character limits**
-- Focus on being as concise as possible while maintaining clarity
-- Cut every unnecessary word before considering length
-- Developer-friendly brevity trumps hitting character targets
-
-**Technical limits** (for reference):
- **Articles**: Maximum 354 characters
- **Categories**: Maximum 362 characters
- **Map Topics**: Maximum 362 characters
@@ -124,4 +81,18 @@ Note: The improved score should reflect your best attempt after internal refinem
- {% data variables.product.prodname_copilot %} = "GitHub Copilot"
- {% data variables.copilot.copilot_coding_agent %} = "Copilot Coding Agent"
-Focus on creating intros that would make sense to someone discovering this content through Google search, clearly communicating the value and relevance of the article.
\ No newline at end of file
+Focus on creating intros that would make sense to someone discovering this content through Google search, clearly communicating the value and relevance of the article.
+
+
+
+## WRITE MODE INSTRUCTIONS
+
+**CRITICAL**: You are in write mode. Output ONLY the YAML frontmatter property to update.
+
+- Return just: `intro: "your improved intro text"`
+- Do NOT include analysis, scoring, explanations, or formatting
+- Do NOT wrap in markdown code blocks or ```yaml
+- Do NOT include the analysis format shown above
+- Just return the clean YAML property line
+
+
\ No newline at end of file
diff --git a/src/ai-tools/prompts/prompt-template.yml b/src/ai-tools/prompts/prompt-template.yml
index dab8d13adf..293a3decfe 100644
--- a/src/ai-tools/prompts/prompt-template.yml
+++ b/src/ai-tools/prompts/prompt-template.yml
@@ -6,4 +6,6 @@ messages:
content: >-
Review this content file according to the provided system prompt.
{{input}}
-model: openai/gpt-5
+model: openai/gpt-4o # Reliable model that works
+temperature: 0.3 # Lower temperature for consistent results
+max_completion_tokens: 4000 # Maximum response length
diff --git a/src/ai-tools/scripts/ai-tools.ts b/src/ai-tools/scripts/ai-tools.ts
index 1be467e02b..fde5245421 100644
--- a/src/ai-tools/scripts/ai-tools.ts
+++ b/src/ai-tools/scripts/ai-tools.ts
@@ -7,6 +7,8 @@ import ora from 'ora'
import { execSync } from 'child_process'
import { callModelsApi } from '@/ai-tools/lib/call-models-api'
import dotenv from 'dotenv'
+import readFrontmatter from '@/frame/lib/read-frontmatter'
+import { schema } from '@/frame/lib/frontmatter'
dotenv.config({ quiet: true })
const __dirname = path.dirname(fileURLToPath(import.meta.url))
@@ -28,35 +30,92 @@ if (!process.env.GITHUB_TOKEN) {
}
}
-interface EditorType {
- description: string
+// Dynamically discover available editor types from prompt files
+const getAvailableEditorTypes = (): string[] => {
+ const editorTypes: string[] = []
+
+ try {
+ const promptFiles = fs.readdirSync(promptDir)
+ for (const file of promptFiles) {
+ if (file.endsWith('.md')) {
+ const editorName = path.basename(file, '.md')
+ editorTypes.push(editorName)
+ }
+ }
+ } catch {
+ console.warn('Could not read prompts directory, using empty editor types')
+ }
+
+ return editorTypes
}
-interface EditorTypes {
- versioning: EditorType
- intro: EditorType
-}
+const editorTypes = getAvailableEditorTypes()
-const editorTypes: EditorTypes = {
- versioning: {
- description: 'Refine versioning according to simplification guidance.',
- },
- intro: {
- description: 'Refine intro frontmatter based on SEO and content guidelines.',
- },
+// Enhanced recursive markdown file finder with symlink, depth, and root path checks
+const findMarkdownFiles = (
+ dir: string,
+ rootDir: string,
+ depth: number = 0,
+ maxDepth: number = 20,
+ visited: Set = new Set(),
+): string[] => {
+ const markdownFiles: string[] = []
+ let realDir: string
+ try {
+ realDir = fs.realpathSync(dir)
+ } catch {
+ // If we can't resolve real path, skip this directory
+ return []
+ }
+ // Prevent escaping root directory
+ if (!realDir.startsWith(rootDir)) {
+ return []
+ }
+ // Prevent symlink loops
+ if (visited.has(realDir)) {
+ return []
+ }
+ visited.add(realDir)
+ // Prevent excessive depth
+ if (depth > maxDepth) {
+ return []
+ }
+ let entries: fs.Dirent[]
+ try {
+ entries = fs.readdirSync(realDir, { withFileTypes: true })
+ } catch {
+ // If we can't read directory, skip
+ return []
+ }
+ for (const entry of entries) {
+ const fullPath = path.join(realDir, entry.name)
+ let realFullPath: string
+ try {
+ realFullPath = fs.realpathSync(fullPath)
+ } catch {
+ continue
+ }
+ // Prevent escaping root directory for files
+ if (!realFullPath.startsWith(rootDir)) {
+ continue
+ }
+ if (entry.isDirectory()) {
+ markdownFiles.push(...findMarkdownFiles(realFullPath, rootDir, depth + 1, maxDepth, visited))
+ } else if (entry.isFile() && entry.name.endsWith('.md')) {
+ markdownFiles.push(realFullPath)
+ }
+ }
+ return markdownFiles
}
const refinementDescriptions = (): string => {
- let str = '\n\n'
- for (const [ed, edObj] of Object.entries(editorTypes)) {
- str += ` ${ed.padEnd(12)} ${edObj.description}\n`
- }
- return str
+ return editorTypes.join(', ')
}
interface CliOptions {
verbose?: boolean
- refine: Array
+ prompt?: string[]
+ refine?: string[]
files: string[]
write?: boolean
}
@@ -71,9 +130,10 @@ program
'-w, --write',
'Write changes back to the original files (default: output to console only)',
)
- .requiredOption(
+ .option('-p, --prompt ', `Specify one or more prompt type: ${refinementDescriptions()}`)
+ .option(
'-r, --refine ',
- `Specify one or more refinement type: ${refinementDescriptions().trimEnd()}\n`,
+ `(Deprecated: use --prompt) Specify one or more prompt type: ${refinementDescriptions()}`,
)
.requiredOption(
'-f, --files ',
@@ -84,7 +144,30 @@ program
const spinner = ora('Starting AI review...').start()
const files = options.files
- const editors = options.refine
+ // Handle both --prompt and --refine options for backwards compatibility
+ const prompts = options.prompt || options.refine
+
+ if (!prompts || prompts.length === 0) {
+ spinner.fail('No prompt type specified. Use --prompt or --refine with one or more types.')
+ process.exitCode = 1
+ return
+ }
+
+ // Validate that all requested editor types exist
+ const availableEditors = editorTypes
+ for (const editor of prompts) {
+ if (!availableEditors.includes(editor)) {
+ spinner.fail(
+ `Unknown prompt type: ${editor}. Available types: ${availableEditors.join(', ')}`,
+ )
+ process.exitCode = 1
+ return
+ }
+ }
+
+ if (options.verbose) {
+ console.log(`Processing ${files.length} files with prompts: ${prompts.join(', ')}`)
+ }
for (const file of files) {
const filePath = path.resolve(process.cwd(), file)
@@ -96,37 +179,101 @@ program
continue
}
- try {
- spinner.text = `Reading file: ${file}`
- const content = fs.readFileSync(filePath, 'utf8')
+ // Check if it's a directory
+ const isDirectory = fs.statSync(filePath).isDirectory()
- for (const editorType of editors) {
- spinner.text = `Running the AI-powered ${editorType} refinement...`
- const answer = await callEditor(editorType, content, options.write || false)
- spinner.stop()
+ for (const editorType of prompts) {
+ try {
+ // For other editor types, process individual files
+ const filesToProcess: string[] = []
- if (options.write) {
- // Write the result back to the original file
- fs.writeFileSync(filePath, answer, 'utf8')
- console.log(`ā
Updated: ${file}`)
+ if (isDirectory) {
+ // Find all markdown files in the directory recursively
+ // Use process.cwd() as the root directory for safety
+ const rootDir = fs.realpathSync(process.cwd())
+ filesToProcess.push(...findMarkdownFiles(filePath, rootDir))
+
+ if (filesToProcess.length === 0) {
+ spinner.warn(`No markdown files found in directory: ${file}`)
+ continue
+ }
+
+ spinner.text = `Found ${filesToProcess.length} markdown files in ${file}`
} else {
- // Just output to console (current behavior)
- console.log(answer)
+ filesToProcess.push(filePath)
}
+
+ spinner.start()
+ for (const fileToProcess of filesToProcess) {
+ const relativePath = path.relative(process.cwd(), fileToProcess)
+ spinner.text = `Processing: ${relativePath}`
+ try {
+ const content = fs.readFileSync(fileToProcess, 'utf8')
+ const answer = await callEditor(
+ editorType,
+ content,
+ options.write || false,
+ options.verbose || false,
+ )
+ spinner.stop()
+
+ if (options.write) {
+ if (editorType === 'intro') {
+ // For frontmatter addition/modification, merge properties instead of overwriting entire file
+ const updatedContent = mergeFrontmatterProperties(fileToProcess, answer)
+ fs.writeFileSync(fileToProcess, updatedContent, 'utf8')
+ console.log(`ā
Added frontmatter properties to: ${relativePath}`)
+ } else {
+ // For other editor types, write the full result back to the original file
+ fs.writeFileSync(fileToProcess, answer, 'utf8')
+ console.log(`ā
Updated: ${relativePath}`)
+ }
+ } else {
+ // Just output to console (current behavior)
+ if (filesToProcess.length > 1) {
+ console.log(`\n=== ${relativePath} ===`)
+ }
+ console.log(answer)
+ }
+ } catch (err) {
+ const error = err as Error
+ spinner.fail(`Error processing ${relativePath}: ${error.message}`)
+ process.exitCode = 1
+ } finally {
+ spinner.stop()
+ }
+ }
+ } catch (err) {
+ const error = err as Error
+ const targetName = path.relative(process.cwd(), filePath)
+ spinner.fail(`Error processing ${targetName}: ${error.message}`)
+ process.exitCode = 1
}
- } catch (err) {
- const error = err as Error
- spinner.fail(`Error processing file ${file}: ${error.message}`)
- process.exitCode = 1
}
}
spinner.stop()
+
+ // Exit with appropriate code based on whether any errors occurred
+ if (process.exitCode) {
+ process.exit(process.exitCode)
+ }
})()
})
program.parse(process.argv)
+// Handle graceful shutdown
+process.on('SIGINT', () => {
+ console.log('\n\nš Process interrupted by user')
+ process.exit(0)
+})
+
+process.on('SIGTERM', () => {
+ console.log('\n\nš Process terminated')
+ process.exit(0)
+})
+
interface PromptMessage {
content: string
role: string
@@ -139,26 +286,111 @@ interface PromptData {
max_tokens?: number
}
-async function callEditor(
- editorType: keyof EditorTypes,
- content: string,
- writeMode: boolean,
-): Promise {
- const markdownPromptPath = path.join(promptDir, `${editorType}.md`)
- let markdownPrompt = fs.readFileSync(markdownPromptPath, 'utf8')
+// Function to merge new frontmatter properties into existing file while preserving formatting
+function mergeFrontmatterProperties(filePath: string, newPropertiesYaml: string): string {
+ const content = fs.readFileSync(filePath, 'utf8')
+ const parsed = readFrontmatter(content)
- // For intro type in write mode, append special instructions
- if (editorType === 'intro' && writeMode) {
- markdownPrompt +=
- '\n\n**WRITE MODE**: Output only the complete updated file content with the new intro in the frontmatter. Do not include analysis or explanations - just return the file ready to write.'
+ if (parsed.errors && parsed.errors.length > 0) {
+ throw new Error(
+ `Failed to parse frontmatter: ${parsed.errors.map((e) => e.message).join(', ')}`,
+ )
}
+ if (!parsed.content) {
+ throw new Error('Failed to parse content from file')
+ }
+
+ try {
+ // Clean up the AI response - remove markdown code blocks if present
+ let cleanedYaml = newPropertiesYaml.trim()
+ cleanedYaml = cleanedYaml.replace(/^```ya?ml\s*\n/i, '')
+ cleanedYaml = cleanedYaml.replace(/\n```\s*$/i, '')
+ cleanedYaml = cleanedYaml.trim()
+
+ interface FrontmatterProperties {
+ intro?: string
+ [key: string]: unknown
+ }
+ const newProperties = yaml.load(cleanedYaml) as FrontmatterProperties
+
+ // Security: Validate against prototype pollution using the official frontmatter schema
+ const allowedKeys = Object.keys(schema.properties)
+
+ const sanitizedProperties = Object.fromEntries(
+ Object.entries(newProperties).filter(([key]) => {
+ if (allowedKeys.includes(key)) {
+ return true
+ }
+ console.warn(`Filtered out potentially unsafe frontmatter key: ${key}`)
+ return false
+ }),
+ )
+
+ // Merge new properties with existing frontmatter
+ const mergedData: FrontmatterProperties = { ...parsed.data, ...sanitizedProperties }
+
+ // Manually ensure intro is wrapped in single quotes in the final output
+ let result = readFrontmatter.stringify(parsed.content, mergedData)
+
+ // Post-process to ensure intro field has single quotes
+ if (newProperties.intro) {
+ const introValue = newProperties.intro.toString()
+ // Replace any quote style on intro with single quotes
+ result = result.replace(
+ /^intro:\s*(['"`]?)([^'"`\n\r]+)\1?\s*$/m,
+ `intro: '${introValue.replace(/'/g, "''")}'`, // Escape single quotes by doubling them
+ )
+ }
+ return result
+ } catch (error) {
+ console.error('Failed to parse AI response as YAML:')
+ console.error('Raw AI response:', JSON.stringify(newPropertiesYaml))
+ throw new Error(`Failed to parse new frontmatter properties: ${error}`)
+ }
+}
+
+async function callEditor(
+ editorType: string,
+ content: string,
+ writeMode: boolean,
+ verbose = false,
+): Promise {
+ const markdownPromptPath = path.join(promptDir, `${String(editorType)}.md`)
+
+ if (!fs.existsSync(markdownPromptPath)) {
+ throw new Error(`Prompt file not found: ${markdownPromptPath}`)
+ }
+
+ const markdownPrompt = fs.readFileSync(markdownPromptPath, 'utf8')
+
const prompt = yaml.load(fs.readFileSync(promptTemplatePath, 'utf8')) as PromptData
+ // Validate the prompt template has required properties
+ if (!prompt.messages || !Array.isArray(prompt.messages)) {
+ throw new Error('Invalid prompt template: missing or invalid messages array')
+ }
+
for (const msg of prompt.messages) {
msg.content = msg.content.replace('{{markdownPrompt}}', markdownPrompt)
msg.content = msg.content.replace('{{input}}', content)
+ // Replace writeMode template variable with simple string replacement
+ msg.content = msg.content.replace(
+ //g,
+ writeMode ? '' : '',
+ )
+ msg.content = msg.content.replace(
+ //g,
+ writeMode ? '' : '',
+ )
+ msg.content = msg.content.replace(
+ //g,
+ writeMode ? '' : '',
+ )
+
+ // Remove sections marked for removal
+ msg.content = msg.content.replace(/[\s\S]*?/g, '')
}
- return callModelsApi(prompt)
+ return callModelsApi(prompt, verbose)
}
diff --git a/src/app/lib/server-context-utils.ts b/src/app/lib/server-context-utils.ts
index 7bb74149c2..3d4dcec2df 100644
--- a/src/app/lib/server-context-utils.ts
+++ b/src/app/lib/server-context-utils.ts
@@ -3,12 +3,13 @@ import { extractVersionFromPath } from '@/app/lib/version-utils'
import { getUIDataMerged } from '@/data-directory/lib/get-data'
import { type LanguageCode } from '@/languages/lib/languages'
import { createTranslationFunctions, translate } from '@/languages/lib/translation-utils'
+import type { UIStrings } from '@/frame/components/context/MainContext'
export interface ServerAppRouterContext {
currentLanguage: LanguageCode
currentVersion: string
sitename: string
- site: { data: { ui: any } }
+ site: { data: { ui: UIStrings } }
}
/**
diff --git a/src/assets/middleware/dynamic-assets.ts b/src/assets/middleware/dynamic-assets.ts
index 25ed3fb67c..9053151a31 100644
--- a/src/assets/middleware/dynamic-assets.ts
+++ b/src/assets/middleware/dynamic-assets.ts
@@ -144,7 +144,11 @@ export default async function dynamicAssets(
assetCacheControl(res)
return res.type('image/webp').send(buffer)
} catch (catchError) {
- if (catchError instanceof Error && (catchError as any).code !== 'ENOENT') {
+ if (
+ catchError instanceof Error &&
+ 'code' in catchError &&
+ (catchError as NodeJS.ErrnoException).code !== 'ENOENT'
+ ) {
throw catchError
}
}
diff --git a/src/content-linter/lib/linting-rules/journey-tracks-guide-path-exists.ts b/src/content-linter/lib/linting-rules/journey-tracks-guide-path-exists.ts
index e4a9d61390..17bfbe04c3 100644
--- a/src/content-linter/lib/linting-rules/journey-tracks-guide-path-exists.ts
+++ b/src/content-linter/lib/linting-rules/journey-tracks-guide-path-exists.ts
@@ -52,10 +52,12 @@ export const journeyTracksGuidePathExists = {
description: 'Journey track guide paths must reference existing content files',
tags: ['frontmatter', 'journey-tracks'],
function: (params: RuleParams, onError: RuleErrorCallback) => {
- // Using any for frontmatter as it's a dynamic YAML object with varying properties
- const fm: any = getFrontmatter(params.lines)
- if (!fm || !fm.journeyTracks || !Array.isArray(fm.journeyTracks)) return
- if (!fm.layout || fm.layout !== 'journey-landing') return
+ // Using unknown for frontmatter as it's a dynamic YAML object with varying properties
+ const fm: unknown = getFrontmatter(params.lines)
+ if (!fm || typeof fm !== 'object' || !('journeyTracks' in fm)) return
+ const fmObj = fm as Record
+ if (!Array.isArray(fmObj.journeyTracks)) return
+ if (!('layout' in fmObj) || fmObj.layout !== 'journey-landing') return
const journeyTracksLine = params.lines.find((line: string) => line.startsWith('journeyTracks:'))
@@ -63,11 +65,13 @@ export const journeyTracksGuidePathExists = {
const journeyTracksLineNumber = params.lines.indexOf(journeyTracksLine) + 1
- for (let trackIndex = 0; trackIndex < fm.journeyTracks.length; trackIndex++) {
- const track: any = fm.journeyTracks[trackIndex]
- if (track.guides && Array.isArray(track.guides)) {
- for (let guideIndex = 0; guideIndex < track.guides.length; guideIndex++) {
- const guide: string = track.guides[guideIndex]
+ for (let trackIndex = 0; trackIndex < fmObj.journeyTracks.length; trackIndex++) {
+ const track: unknown = fmObj.journeyTracks[trackIndex]
+ if (!track || typeof track !== 'object' || !('guides' in track)) continue
+ const trackObj = track as Record
+ if (trackObj.guides && Array.isArray(trackObj.guides)) {
+ for (let guideIndex = 0; guideIndex < trackObj.guides.length; guideIndex++) {
+ const guide: string = trackObj.guides[guideIndex]
if (typeof guide === 'string') {
if (!isValidGuidePath(guide, params.name)) {
addError(
diff --git a/src/content-linter/lib/linting-rules/journey-tracks-unique-ids.ts b/src/content-linter/lib/linting-rules/journey-tracks-unique-ids.ts
index fc3e415a19..ff61b1e310 100644
--- a/src/content-linter/lib/linting-rules/journey-tracks-unique-ids.ts
+++ b/src/content-linter/lib/linting-rules/journey-tracks-unique-ids.ts
@@ -10,9 +10,11 @@ export const journeyTracksUniqueIds = {
tags: ['frontmatter', 'journey-tracks', 'unique-ids'],
function: function GHD060(params: RuleParams, onError: RuleErrorCallback) {
// Using any for frontmatter as it's a dynamic YAML object with varying properties
- const fm: any = getFrontmatter(params.lines)
- if (!fm || !fm.journeyTracks || !Array.isArray(fm.journeyTracks)) return
- if (!fm.layout || fm.layout !== 'journey-landing') return
+ const fm: unknown = getFrontmatter(params.lines)
+ if (!fm || typeof fm !== 'object' || !('journeyTracks' in fm)) return
+ const fmObj = fm as Record
+ if (!Array.isArray(fmObj.journeyTracks)) return
+ if (!('layout' in fmObj) || fmObj.layout !== 'journey-landing') return
// Find the base journeyTracks line
const journeyTracksLine: string | undefined = params.lines.find((line: string) =>
@@ -37,7 +39,7 @@ export const journeyTracksUniqueIds = {
trackCount++
// Stop once we've found all the tracks we know exist
- if (fm && fm.journeyTracks && trackCount >= fm.journeyTracks.length) {
+ if (Array.isArray(fmObj.journeyTracks) && trackCount >= fmObj.journeyTracks.length) {
break
}
}
@@ -48,11 +50,12 @@ export const journeyTracksUniqueIds = {
// Track seen journey track IDs and line number for error reporting
const seenIds = new Map()
- for (let index = 0; index < fm.journeyTracks.length; index++) {
- const track: any = fm.journeyTracks[index]
- if (!track || typeof track !== 'object') continue
+ for (let index = 0; index < fmObj.journeyTracks.length; index++) {
+ const track: unknown = fmObj.journeyTracks[index]
+ if (!track || typeof track !== 'object' || !('id' in track)) continue
- const trackId = track.id
+ const trackObj = track as Record
+ const trackId = trackObj.id
if (!trackId || typeof trackId !== 'string') continue
const currentLineNumber = getTrackLineNumber(index)
diff --git a/src/content-linter/lib/linting-rules/liquid-ifversion-versions.ts b/src/content-linter/lib/linting-rules/liquid-ifversion-versions.ts
index 7af206e359..6631133278 100644
--- a/src/content-linter/lib/linting-rules/liquid-ifversion-versions.ts
+++ b/src/content-linter/lib/linting-rules/liquid-ifversion-versions.ts
@@ -15,8 +15,9 @@ import {
isAllVersions,
getFeatureVersionsObject,
isInAllGhes,
+ isGhesReleaseDeprecated,
} from '@/ghes-releases/scripts/version-utils'
-import { deprecated, oldestSupported } from '@/versions/lib/enterprise-server-releases'
+import { oldestSupported } from '@/versions/lib/enterprise-server-releases'
import type { RuleParams, RuleErrorCallback } from '@/content-linter/types'
export const liquidIfversionVersions = {
@@ -337,19 +338,9 @@ function updateConditionals(condTagItems: any[]) {
}
// Checks for features that are only available in no
// supported GHES releases
- // TODO use isGhesReleaseDeprecated
- if (item.versionsObjAll.ghes.startsWith('<=')) {
- const releaseNumber = item.versionsObjAll.ghes.replace('<=', '').trim()
- if (deprecated.includes(releaseNumber)) {
- item.action.type = 'delete'
- continue
- }
- } else if (item.versionsObjAll.ghes.startsWith('<')) {
- const releaseNumber = item.versionsObjAll.ghes.replace('<', '').trim()
- if (deprecated.includes(releaseNumber) || releaseNumber === oldestSupported) {
- item.action.type = 'delete'
- continue
- }
+ if (isGhesReleaseDeprecated(oldestSupported, item.versionsObjAll.ghes)) {
+ item.action.type = 'delete'
+ continue
}
}
if (item.versionsObj?.feature || item.fileVersionsFm?.feature) break
diff --git a/src/content-linter/scripts/lint-content.ts b/src/content-linter/scripts/lint-content.ts
index f84ec31df8..a5ab8ef7f7 100755
--- a/src/content-linter/scripts/lint-content.ts
+++ b/src/content-linter/scripts/lint-content.ts
@@ -284,6 +284,11 @@ async function main() {
// Ensure previous console logging is not truncated
console.log('\n')
const took = end - start
+ if (warningFileCount > 0 || errorFileCount > 0) {
+ spinner.info(
+ `š” You can disable linter rules for specific lines or blocks of text. See https://gh.io/suppress-linter-rule.\n\n`,
+ )
+ }
spinner.info(
`š¦ Markdownlint finished in ${(took > 1000 ? took / 1000 : took).toFixed(1)} ${
took > 1000 ? 's' : 'ms'
diff --git a/src/content-linter/tests/category-pages.ts b/src/content-linter/tests/category-pages.ts
index 42e4a79d1f..a6add3b574 100644
--- a/src/content-linter/tests/category-pages.ts
+++ b/src/content-linter/tests/category-pages.ts
@@ -53,7 +53,7 @@ describe.skip('category pages', () => {
// Get links included in product index page.
// Each link corresponds to a product subdirectory (category).
// Example: "getting-started-with-github"
- const contents = fs.readFileSync(productIndex, 'utf8') // TODO move to async
+ const contents = fs.readFileSync(productIndex, 'utf8')
const data = getFrontmatterData(contents)
const productDir = path.dirname(productIndex)
@@ -62,7 +62,6 @@ describe.skip('category pages', () => {
const categoryLinks = children
// Only include category directories, not standalone category files like content/actions/quickstart.md
.filter((link) => fs.existsSync(getPath(productDir, link, 'index')))
- // TODO this should move to async, but you can't asynchronously define tests with vitest...
// Map those to the Markdown file paths that represent that category page index
const categoryPaths = categoryLinks.map((link) => getPath(productDir, link, 'index'))
diff --git a/src/content-render/liquid/tool.ts b/src/content-render/liquid/tool.ts
index c62dedf1ea..922893032e 100644
--- a/src/content-render/liquid/tool.ts
+++ b/src/content-render/liquid/tool.ts
@@ -47,26 +47,27 @@ export const Tool = {
type: 'block' as const,
tagName: '',
// Liquid template objects don't have TypeScript definitions
- templates: [] as any[],
+ templates: [] as unknown[],
// tagToken and remainTokens are Liquid internal types without TypeScript definitions
- parse(tagToken: any, remainTokens: any) {
- this.tagName = tagToken.name
+ parse(tagToken: unknown, remainTokens: unknown) {
+ const token = tagToken as { name: string; getText: () => string }
+ this.tagName = token.name
this.templates = []
const stream = this.liquid.parser.parseStream(remainTokens)
stream
.on(`tag:end${this.tagName}`, () => stream.stop())
// tpl is a Liquid template object without TypeScript definitions
- .on('template', (tpl: any) => this.templates.push(tpl))
+ .on('template', (tpl: unknown) => this.templates.push(tpl))
.on('end', () => {
- throw new Error(`tag ${tagToken.getText()} not closed`)
+ throw new Error(`tag ${token.getText()} not closed`)
})
stream.start()
},
// scope is a Liquid scope object, Generator yields/returns Liquid template values - no TypeScript definitions available
- *render(scope: any): Generator {
+ *render(scope: unknown): Generator {
const output = yield this.liquid.renderer.renderTemplates(this.templates, scope)
return yield this.liquid.parseAndRender(template, {
tagName: this.tagName,
diff --git a/src/content-render/scripts/all-documents/lib.ts b/src/content-render/scripts/all-documents/lib.ts
index fc7f304276..47f6a7aacb 100644
--- a/src/content-render/scripts/all-documents/lib.ts
+++ b/src/content-render/scripts/all-documents/lib.ts
@@ -1,6 +1,6 @@
import type { Response } from 'express'
-import type { ExtendedRequest, Page } from '@/types'
+import type { ExtendedRequest, Page, Context } from '@/types'
import contextualize from '@/frame/middleware/context/context'
import features from '@/versions/middleware/features'
import shortVersions from '@/versions/middleware/short-versions'
@@ -55,7 +55,7 @@ export async function allDocuments(options: Options): Promise {
const next = () => {}
const res = {}
const pagePath = permalink.href
- const context: any = {}
+ const context: Partial = {}
const req = {
path: pagePath,
language: permalink.languageCode,
@@ -68,7 +68,7 @@ export async function allDocuments(options: Options): Promise {
await contextualize(req as ExtendedRequest, res as Response, next)
await shortVersions(req as ExtendedRequest, res as Response, next)
req.context.page = page
- features(req as any, res as any, next)
+ features(req as ExtendedRequest, res as Response, next)
const title = fields.includes('title')
? await page.renderProp('title', req.context, { textOnly: true })
diff --git a/src/early-access/scripts/what-docs-early-access-branch.ts b/src/early-access/scripts/what-docs-early-access-branch.ts
index c6612b450b..3bc261f79a 100644
--- a/src/early-access/scripts/what-docs-early-access-branch.ts
+++ b/src/early-access/scripts/what-docs-early-access-branch.ts
@@ -2,7 +2,6 @@ import { getOctokit } from '@actions/github'
import { setOutput } from '@actions/core'
async function main(): Promise {
- // TODO Is there a lib function for this?
const { BRANCH_NAME, GITHUB_TOKEN } = process.env
if (!BRANCH_NAME) throw new Error("'BRANCH_NAME' env var not set")
if (!GITHUB_TOKEN) throw new Error("'GITHUB_TOKEN' env var not set")
diff --git a/src/events/tests/middleware.ts b/src/events/tests/middleware.ts
index a450878436..f1a500958b 100644
--- a/src/events/tests/middleware.ts
+++ b/src/events/tests/middleware.ts
@@ -6,7 +6,7 @@ import { contentTypesEnum } from '@/frame/lib/frontmatter'
describe('POST /events', () => {
vi.setConfig({ testTimeout: 60 * 1000 })
- async function checkEvent(data: any) {
+ async function checkEvent(data: unknown) {
if (!Array.isArray(data)) {
data = [data]
}
diff --git a/src/frame/tests/page.ts b/src/frame/tests/page.ts
index 9057315ef1..03f8821b07 100644
--- a/src/frame/tests/page.ts
+++ b/src/frame/tests/page.ts
@@ -439,19 +439,6 @@ describe('catches errors thrown in Page class', () => {
await expect(getPage).rejects.toThrowError('versions')
})
- // TODO - UNSKIP WHEN GHAE IS UPDATED WITH SEMVER VERSIONING
- test.skip('invalid versions frontmatter', async () => {
- async function getPage() {
- return await Page.init({
- relativePath: 'page-with-invalid-product-version.md',
- basePath: path.join(__dirname, '../../../src/fixtures/fixtures'),
- languageCode: 'en',
- })
- }
-
- await expect(getPage).rejects.toThrowError('versions')
- })
-
test('English page with a version in frontmatter that its parent product is not available in', async () => {
async function getPage() {
return await Page.init({
diff --git a/src/frame/tests/pages.ts b/src/frame/tests/pages.ts
index 5ce2674a93..2ae7279cee 100644
--- a/src/frame/tests/pages.ts
+++ b/src/frame/tests/pages.ts
@@ -60,14 +60,15 @@ describe('pages module', () => {
// Page objects have dynamic properties from chain/lodash that aren't fully typed
for (const page of englishPages) {
- for (const redirect of (page as any).redirect_from) {
- for (const version of (page as any).applicableVersions) {
+ const pageObj = page as Record
+ for (const redirect of pageObj.redirect_from as string[]) {
+ for (const version of pageObj.applicableVersions as string[]) {
const versioned = removeFPTFromPath(path.posix.join('/', version, redirect))
- versionedRedirects.push({ path: versioned, file: (page as any).fullPath })
+ versionedRedirects.push({ path: versioned, file: pageObj.fullPath as string })
if (!redirectToFiles.has(versioned)) {
redirectToFiles.set(versioned, new Set())
}
- redirectToFiles.get(versioned)!.add((page as any).fullPath)
+ redirectToFiles.get(versioned)!.add(pageObj.fullPath as string)
}
}
}
@@ -97,7 +98,7 @@ describe('pages module', () => {
page.languageCode === 'en' && // only check English
!page.relativePath.includes('index.md') && // ignore TOCs
// Page class has dynamic frontmatter properties like 'allowTitleToDifferFromFilename' not in type definition
- !(page as any).allowTitleToDifferFromFilename && // ignore docs with override
+ !(page as Record).allowTitleToDifferFromFilename && // ignore docs with override
slugger.slug(decode(page.title)) !== path.basename(page.relativePath, '.md') &&
slugger.slug(decode(page.shortTitle || '')) !== path.basename(page.relativePath, '.md')
)
@@ -129,7 +130,7 @@ describe('pages module', () => {
const frontmatterErrors = chain(pages)
// .filter(page => page.languageCode === 'en')
// Page class has dynamic error properties like 'frontmatterErrors' not in type definition
- .map((page) => (page as any).frontmatterErrors)
+ .map((page) => (page as Record).frontmatterErrors)
.filter(Boolean)
.flatten()
.value()
@@ -149,7 +150,7 @@ describe('pages module', () => {
for (const page of pages) {
// Page class has dynamic properties like 'raw' markdown not in type definition
- const markdown = (page as any).raw
+ const markdown = (page as Record).raw as string
if (!patterns.hasLiquid.test(markdown)) continue
try {
await liquid.parse(markdown)
diff --git a/src/frame/tests/site-tree.ts b/src/frame/tests/site-tree.ts
index a19b17a8d3..5637b02678 100644
--- a/src/frame/tests/site-tree.ts
+++ b/src/frame/tests/site-tree.ts
@@ -7,6 +7,7 @@ import { loadSiteTree } from '@/frame/lib/page-data'
import nonEnterpriseDefaultVersion from '@/versions/lib/non-enterprise-default-version'
import { formatAjvErrors } from '@/tests/helpers/schemas'
import type { SiteTree, Tree } from '@/types'
+import findPageInSiteTree from '@/frame/lib/find-page-in-site-tree'
const latestEnterpriseRelease = EnterpriseServerReleases.latest
@@ -37,15 +38,14 @@ describe('siteTree', () => {
const ghesSiteTree = siteTree.en[ghesLatest]
// Find a page in the tree that we know contains Liquid
- // TODO: use new findPageInSiteTree helper when it's available
- const pageWithDynamicTitle = ghesSiteTree.childPages
- .find((child) => child.href === `/en/${ghesLatest}/admin`)
- ?.childPages.find(
- (child) => child.href === `/en/${ghesLatest}/admin/installing-your-enterprise-server`,
- )
+ const pageWithDynamicTitle = findPageInSiteTree(
+ ghesSiteTree,
+ siteTree.en[nonEnterpriseDefaultVersion],
+ `/en/${ghesLatest}/admin/installing-your-enterprise-server`,
+ )
// Confirm the raw title contains Liquid
- expect(pageWithDynamicTitle?.page.title).toEqual(
+ expect(pageWithDynamicTitle.page.title).toEqual(
'Installing {% data variables.product.prodname_enterprise %}',
)
})
diff --git a/src/landings/components/SidebarProduct.tsx b/src/landings/components/SidebarProduct.tsx
index 164dcab4f3..833e978d8e 100644
--- a/src/landings/components/SidebarProduct.tsx
+++ b/src/landings/components/SidebarProduct.tsx
@@ -146,8 +146,6 @@ function RestNavListItem({ category }: { category: ProductTreeNode }) {
},
{ rootMargin: '0px 0px -85% 0px' },
)
- // TODO: When we add the ## About the {title} API to each operation
- // we can remove the h2 here
const headingsList = Array.from(document.querySelectorAll('h2, h3'))
for (const heading of headingsList) {
diff --git a/src/languages/tests/translation-error-comments.ts b/src/languages/tests/translation-error-comments.ts
index 19bf22a914..7052e8023f 100644
--- a/src/languages/tests/translation-error-comments.ts
+++ b/src/languages/tests/translation-error-comments.ts
@@ -8,9 +8,20 @@ import {
import { TitleFromAutotitleError } from '@/content-render/unified/rewrite-local-links'
import Page from '@/frame/lib/page'
+// Type aliases for error objects with token information
+type ErrorWithToken = Error & { token: { file: string; getPosition: () => number[] } }
+type ErrorWithTokenNoFile = Error & { token: { getPosition: () => number[] } }
+type ErrorWithTokenNoPosition = Error & { token: { file: string } }
+type ErrorWithTokenAndOriginal = Error & {
+ token: { file: string; getPosition: () => number[] }
+ originalError: Error
+}
+
describe('Translation Error Comments', () => {
// Mock renderContent for integration tests
- let mockRenderContent: MockedFunction<(template: string, context: any) => string>
+ let mockRenderContent: MockedFunction<
+ (template: string, context: Record) => string
+ >
beforeEach(() => {
mockRenderContent = vi.fn()
@@ -26,7 +37,7 @@ describe('Translation Error Comments', () => {
test('includes all fields when token information is available', () => {
const error = new Error("Unknown tag 'badtag', line:1, col:3")
error.name = 'ParseError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test/article.md',
getPosition: () => [1, 3],
}
@@ -48,11 +59,13 @@ describe('Translation Error Comments', () => {
test('includes original error message when available', () => {
const error = new Error("Unknown variable 'variables.nonexistent.value'")
error.name = 'RenderError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test/intro.md',
getPosition: () => [3, 15],
}
- ;(error as any).originalError = new Error('Variable not found: variables.nonexistent.value')
+ ;(error as unknown as ErrorWithTokenAndOriginal).originalError = new Error(
+ 'Variable not found: variables.nonexistent.value',
+ )
const result = createTranslationFallbackComment(error, 'rawIntro')
@@ -67,7 +80,7 @@ describe('Translation Error Comments', () => {
test('falls back to main error message when no originalError', () => {
const error = new Error('Main error message')
error.name = 'RenderError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -82,7 +95,7 @@ describe('Translation Error Comments', () => {
test('includes tokenization error details', () => {
const error = new Error('Unexpected token, line:1, col:10')
error.name = 'TokenizationError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test/page.md',
getPosition: () => [1, 10],
}
@@ -152,7 +165,7 @@ describe('Translation Error Comments', () => {
test('handles error with token but no file', () => {
const error = new Error('Error message')
error.name = 'ParseError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithTokenNoFile).token = {
// No file property
getPosition: () => [5, 10],
}
@@ -167,7 +180,7 @@ describe('Translation Error Comments', () => {
test('handles error with token but no getPosition method', () => {
const error = new Error('Error message')
error.name = 'ParseError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithTokenNoPosition).token = {
file: '/content/test.md',
// No getPosition method
}
@@ -246,7 +259,7 @@ describe('Translation Error Comments', () => {
test('comment format is valid HTML', () => {
const error = new Error('Test error')
error.name = 'ParseError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -264,7 +277,7 @@ describe('Translation Error Comments', () => {
test('contains all required fields when available', () => {
const error = new Error('Detailed error message')
error.name = 'RenderError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/detailed-test.md',
getPosition: () => [42, 15],
}
@@ -283,7 +296,7 @@ describe('Translation Error Comments', () => {
test('maintains consistent field order', () => {
const error = new Error('Test message')
error.name = 'ParseError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -320,18 +333,20 @@ describe('Translation Error Comments', () => {
}
// Mock renderContent to simulate error for Japanese, success for English
- mockRenderContent.mockImplementation((template: string, innerContext: any) => {
- if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
- const error = new Error("Unknown tag 'badtag'")
- error.name = 'ParseError'
- ;(error as any).token = {
- file: '/content/test.md',
- getPosition: () => [1, 5],
+ mockRenderContent.mockImplementation(
+ (template: string, innerContext: Record) => {
+ if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
+ const error = new Error("Unknown tag 'badtag'")
+ error.name = 'ParseError'
+ ;(error as unknown as ErrorWithToken).token = {
+ file: '/content/test.md',
+ getPosition: () => [1, 5],
+ }
+ throw error
}
- throw error
- }
- return innerContext.currentLanguage === 'en' ? 'English Title' : template
- })
+ return innerContext.currentLanguage === 'en' ? 'English Title' : template
+ },
+ )
const result = await renderContentWithFallback(mockPage, 'rawTitle', context)
@@ -357,14 +372,16 @@ describe('Translation Error Comments', () => {
},
}
- mockRenderContent.mockImplementation((template: string, innerContext: any) => {
- if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
- const error = new Error("Unknown tag 'badtag'")
- error.name = 'ParseError'
- throw error
- }
- return 'English Title'
- })
+ mockRenderContent.mockImplementation(
+ (template: string, innerContext: Record) => {
+ if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
+ const error = new Error("Unknown tag 'badtag'")
+ error.name = 'ParseError'
+ throw error
+ }
+ return 'English Title'
+ },
+ )
const result = await renderContentWithFallback(mockPage, 'rawTitle', context, {
textOnly: true,
@@ -384,7 +401,7 @@ describe('Translation Error Comments', () => {
const failingCallable = async () => {
const error = new Error("Unknown variable 'variables.bad'")
error.name = 'RenderError'
- ;(error as any).token = {
+ ;(error as unknown as ErrorWithToken).token = {
file: '/content/article.md',
getPosition: () => [10, 20],
}
diff --git a/src/learning-track/middleware/learning-track.ts b/src/learning-track/middleware/learning-track.ts
index 1bec3b34c5..28e9402890 100644
--- a/src/learning-track/middleware/learning-track.ts
+++ b/src/learning-track/middleware/learning-track.ts
@@ -27,8 +27,6 @@ export default async function learningTrack(
const trackName = req.query.learn as string
let trackProduct = req.context.currentProduct as string
- // TODO: Once getDeepDataByLanguage is ported to TS
- // a more appropriate API would be to use `getDeepDataByLanguage {
- // TODO: Once getDeepDataByLanguage is ported to TS
- // a more appropriate API would be to use `getDeepDataByLanguage & { end: () => unknown }
+
describe('getAutomaticRequestLogger', () => {
let originalEnv: typeof process.env
let originalConsoleLog: typeof console.log
@@ -43,7 +46,7 @@ describe('getAutomaticRequestLogger', () => {
}
// Override res.end to simulate response completion
- function endOverride(this: any, chunk?: any, encoding?: any) {
+ function endOverride(this: Response, chunk?: unknown, encoding?: unknown): Response {
if (!responseEnded) {
responseEnded = true
// Simulate a small delay for response time
@@ -54,7 +57,7 @@ describe('getAutomaticRequestLogger', () => {
return this
}
- ;(mockRes as any).end = endOverride
+ ;(mockRes as { end: typeof endOverride }).end = endOverride
mockNext = vi.fn()
@@ -86,7 +89,7 @@ describe('getAutomaticRequestLogger', () => {
middleware(mockReq as Request, mockRes as Response, mockNext)
// Simulate response completion
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
// Wait for async logging
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -143,7 +146,7 @@ describe('getAutomaticRequestLogger', () => {
}
// Override res.end to simulate response completion
- function endOverride(this: any, chunk?: any, encoding?: any) {
+ function endOverride(this: Response, chunk?: unknown, encoding?: unknown): Response {
if (!responseEnded) {
responseEnded = true
// Simulate a small delay for response time
@@ -154,7 +157,7 @@ describe('getAutomaticRequestLogger', () => {
return this
}
- ;(freshMockRes as any).end = endOverride
+ ;(freshMockRes as { end: typeof endOverride }).end = endOverride
const freshMockNext = vi.fn()
@@ -165,7 +168,7 @@ describe('getAutomaticRequestLogger', () => {
freshMockRes as Partial as Response,
freshMockNext,
)
- ;(freshMockRes as any).end()
+ ;(freshMockRes as MockResponseWithEnd).end()
// Wait for async logging with longer timeout for CI
await new Promise((resolve) => setTimeout(resolve, 50))
@@ -187,7 +190,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -202,7 +205,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -215,7 +218,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -233,7 +236,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -252,7 +255,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -291,7 +294,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
// Wait for any potential async logging with longer timeout for CI
await new Promise((resolve) => setTimeout(resolve, 50))
@@ -309,7 +312,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -320,11 +323,13 @@ describe('getAutomaticRequestLogger', () => {
describe('edge cases', () => {
it('should handle missing content-length header', async () => {
- ;(mockRes as any).getHeader = vi.fn(() => undefined)
+ ;(mockRes as Partial & { getHeader: () => undefined }).getHeader = vi.fn(
+ () => undefined,
+ )
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -333,11 +338,11 @@ describe('getAutomaticRequestLogger', () => {
})
it('should handle missing status code', async () => {
- delete (mockRes as any).statusCode
+ delete (mockRes as Partial & { statusCode?: number }).statusCode
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -351,7 +356,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -368,7 +373,7 @@ describe('getAutomaticRequestLogger', () => {
// Simulate some processing time
await new Promise((resolve) => setTimeout(resolve, 50))
- ;(mockRes as any).end()
+ ;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
const endTime = Date.now()
diff --git a/src/observability/tests/logger.ts b/src/observability/tests/logger.ts
index 8eb9a0239e..db66f8d502 100644
--- a/src/observability/tests/logger.ts
+++ b/src/observability/tests/logger.ts
@@ -327,10 +327,6 @@ describe('createLogger', () => {
logger = createLogger('file:///path/to/test.js')
})
- it('should include logger context in production logs', () => {
- // TODO
- })
-
it('should handle missing logger context gracefully in development', () => {
logger.info('No context test')
diff --git a/src/rest/scripts/update-files.ts b/src/rest/scripts/update-files.ts
index c4619cf5a6..c9d1abb44c 100755
--- a/src/rest/scripts/update-files.ts
+++ b/src/rest/scripts/update-files.ts
@@ -114,10 +114,7 @@ async function main() {
// so that we don't spend time generating data files for them.
if (sourceRepos.includes(REST_API_DESCRIPTION_ROOT)) {
const derefDir = await readdir(TEMP_OPENAPI_DIR)
- // TODO: After migrating all-version.ts to TypeScript, we can remove the type assertion
- const currentOpenApiVersions = Object.values(allVersions).map(
- (elem) => (elem as any).openApiVersionName,
- )
+ const currentOpenApiVersions = Object.values(allVersions).map((elem) => elem.openApiVersionName)
for (const schema of derefDir) {
// if the schema does not start with a current version name, delete it
diff --git a/src/rest/scripts/utils/operation.ts b/src/rest/scripts/utils/operation.ts
index c8d08e545f..aea5778c6f 100644
--- a/src/rest/scripts/utils/operation.ts
+++ b/src/rest/scripts/utils/operation.ts
@@ -168,10 +168,6 @@ export default class Operation {
// Operation Id: markdown/render-raw
const contentType = Object.keys(this.#operation.requestBody.content)[0]
const schema = get(this.#operation, `requestBody.content.${contentType}.schema`, {})
- // TODO: Remove this check
- if (this.#operation.operationId === 'checks/create') {
- delete schema.oneOf
- }
// Merges any instances of allOf in the schema using a deep merge
const mergedAllofSchema = mergeAllOf(schema)
try {
diff --git a/src/rest/tests/create-rest-examples.ts b/src/rest/tests/create-rest-examples.ts
index f371f8dbcf..ed1b531065 100644
--- a/src/rest/tests/create-rest-examples.ts
+++ b/src/rest/tests/create-rest-examples.ts
@@ -52,9 +52,12 @@ describe('rest example requests and responses', () => {
test('check example number and status code appear', async () => {
const mergedExamples = await getCodeSamples(operation)
- // example is any because getCodeSamples returns objects from untyped JavaScript module
+ // example has specific structure from getCodeSamples
for (let index = 0; index < mergedExamples.length; index++) {
- const example: any = mergedExamples[index]
+ const example = mergedExamples[index] as {
+ request: { description: string }
+ response: { statusCode: string }
+ }
expect(example.request.description).toBe(
`Example ${index + 1}: Status Code ${example.response.statusCode}`,
)
diff --git a/src/search/components/helpers/execute-search-actions.ts b/src/search/components/helpers/execute-search-actions.ts
index 3ed2261218..29834277ad 100644
--- a/src/search/components/helpers/execute-search-actions.ts
+++ b/src/search/components/helpers/execute-search-actions.ts
@@ -4,6 +4,7 @@ import { DEFAULT_VERSION } from '@/versions/components/useVersion'
import { NextRouter } from 'next/router'
import { sendEvent } from '@/events/components/events'
import { SEARCH_OVERLAY_EVENT_GROUP } from '@/events/components/event-groups'
+import { sanitizeSearchQuery } from '@/search/lib/sanitize-search-query'
// Search context values for identifying each search event
export const GENERAL_SEARCH_CONTEXT = 'general-search'
@@ -21,7 +22,7 @@ export function executeGeneralSearch(
) {
sendEvent({
type: EventType.search,
- search_query: localQuery,
+ search_query: sanitizeSearchQuery(localQuery),
search_context: GENERAL_SEARCH_CONTEXT,
eventGroupKey: SEARCH_OVERLAY_EVENT_GROUP,
eventGroupId,
diff --git a/src/search/components/input/SearchOverlay.tsx b/src/search/components/input/SearchOverlay.tsx
index 250fc0317e..8ab7fd39af 100644
--- a/src/search/components/input/SearchOverlay.tsx
+++ b/src/search/components/input/SearchOverlay.tsx
@@ -30,6 +30,8 @@ import { useSharedUIContext } from '@/frame/components/context/SharedUIContext'
import type { AIReference } from '../types'
import type { AutocompleteSearchHit, GeneralSearchHit } from '@/search/types'
+import { sanitizeSearchQuery } from '@/search/lib/sanitize-search-query'
+
import styles from './SearchOverlay.module.scss'
type Props = {
@@ -317,15 +319,14 @@ export function SearchOverlay({
const generalSearchResultOnSelect = (selectedOption: GeneralSearchHit) => {
sendEvent({
type: EventType.search,
- // TODO: Remove PII so we can include the actual query
- search_query: urlSearchInputQuery,
+ search_query: sanitizeSearchQuery(urlSearchInputQuery),
search_context: GENERAL_SEARCH_CONTEXT,
eventGroupKey: SEARCH_OVERLAY_EVENT_GROUP,
eventGroupId: searchEventGroupId.current,
})
sendEvent({
type: EventType.searchResult,
- search_result_query: urlSearchInputQuery,
+ search_result_query: sanitizeSearchQuery(urlSearchInputQuery),
search_result_index: selectedIndex,
search_result_total: totalGeneralSearchResults,
search_result_url: selectedOption.url || '',
diff --git a/src/search/lib/ai-search-proxy.ts b/src/search/lib/ai-search-proxy.ts
index dd84458da3..a6bd834df7 100644
--- a/src/search/lib/ai-search-proxy.ts
+++ b/src/search/lib/ai-search-proxy.ts
@@ -21,8 +21,9 @@ export const aiSearchProxy = async (req: ExtendedRequest, res: Response) => {
let docsSource = ''
try {
docsSource = getCSECopilotSource(version)
- } catch (error: any) {
- errors.push({ message: error?.message || 'Invalid version' })
+ } catch (error: unknown) {
+ const message = error instanceof Error ? error.message : 'Invalid version'
+ errors.push({ message })
}
if (errors.length) {
diff --git a/src/search/lib/sanitize-search-query.ts b/src/search/lib/sanitize-search-query.ts
new file mode 100644
index 0000000000..0ad5e2c574
--- /dev/null
+++ b/src/search/lib/sanitize-search-query.ts
@@ -0,0 +1,58 @@
+// Remove PII from search queries before logging
+// Redacts common PII patterns like emails, tokens, and other sensitive data
+
+export function sanitizeSearchQuery(query: string): string {
+ if (!query) return query
+
+ let sanitized = query
+
+ // Redact email addresses
+ sanitized = sanitized.replace(/\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/g, '[EMAIL]')
+
+ // Redact GitHub tokens (all formats)
+ // Classic tokens: ghp_, gho_, ghu_, ghs_, ghr_
+ sanitized = sanitized.replace(/\b(ghp|gho|ghu|ghs|ghr)_[A-Za-z0-9]{20,}\b/gi, '[TOKEN]')
+ // Fine-grained personal access tokens: github_pat_
+ sanitized = sanitized.replace(/\bgithub_pat_[A-Za-z0-9_]{20,}\b/gi, '[TOKEN]')
+ // OAuth tokens: gho_
+ sanitized = sanitized.replace(/\bgho_[A-Za-z0-9]{20,}\b/gi, '[TOKEN]')
+
+ // Redact UUIDs
+ sanitized = sanitized.replace(
+ /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/gi,
+ '[UUID]',
+ )
+
+ // Redact JWT tokens (format: xxx.yyy.zzz where each part is base64url)
+ sanitized = sanitized.replace(
+ /\bey[A-Za-z0-9_-]{10,}\.[A-Za-z0-9_-]{10,}\.[A-Za-z0-9_-]{10,}\b/g,
+ '[JWT]',
+ )
+
+ // Redact IP addresses (with proper validation for 0-255 range)
+ sanitized = sanitized.replace(
+ /\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b/g,
+ '[IP]',
+ )
+
+ // Redact SSH private key headers
+ sanitized = sanitized.replace(/-----BEGIN( [A-Z]+)? PRIVATE KEY-----/g, '[SSH_KEY]')
+
+ // Redact potential API keys (long strings of hex or base64-like characters)
+ // This catches high-entropy strings that might be secrets
+ sanitized = sanitized.replace(/\b[A-Za-z0-9_-]{40,}\b/g, (match) => {
+ // Only redact if it looks like high entropy (mixed case, numbers)
+ const hasLowerCase = /[a-z]/.test(match)
+ const hasUpperCase = /[A-Z]/.test(match)
+ const hasNumbers = /[0-9]/.test(match)
+ const entropyIndicators = [hasLowerCase, hasUpperCase, hasNumbers].filter(Boolean).length
+
+ // If it has at least 2 of the 3 character types, it's likely a secret
+ if (entropyIndicators >= 2) {
+ return '[SECRET]'
+ }
+ return match
+ })
+
+ return sanitized
+}
diff --git a/src/search/scripts/scrape/lib/domwaiter.ts b/src/search/scripts/scrape/lib/domwaiter.ts
index 34302c823b..70e1251f6f 100644
--- a/src/search/scripts/scrape/lib/domwaiter.ts
+++ b/src/search/scripts/scrape/lib/domwaiter.ts
@@ -22,6 +22,10 @@ class HTTPError extends Error {
}
}
+// Type aliases for error objects with additional URL information
+type HTTPErrorWithUrl = HTTPError & { url?: string; relativePath?: string }
+type ErrorWithUrl = Error & { url?: string; relativePath?: string }
+
interface DomWaiterOptions {
parseDOM?: boolean
json?: boolean
@@ -94,8 +98,8 @@ async function getPage(page: Permalink, emitter: EventEmitter, opts: DomWaiterOp
{ requestUrl: { pathname: page.url } },
)
// Add URL and path info directly to the HTTPError
- ;(httpError as any).url = page.url
- ;(httpError as any).relativePath = page.relativePath
+ ;(httpError as HTTPErrorWithUrl).url = page.url
+ ;(httpError as HTTPErrorWithUrl).relativePath = page.relativePath
// Emit error instead of throwing
emitter.emit('error', httpError)
return // Exit early, don't continue processing
@@ -109,8 +113,8 @@ async function getPage(page: Permalink, emitter: EventEmitter, opts: DomWaiterOp
const enhancedError = new Error(err.message, { cause: err.cause })
enhancedError.name = err.name
enhancedError.stack = err.stack
- ;(enhancedError as any).url = page.url
- ;(enhancedError as any).relativePath = page.relativePath
+ ;(enhancedError as ErrorWithUrl).url = page.url
+ ;(enhancedError as ErrorWithUrl).relativePath = page.relativePath
emitter.emit('error', enhancedError)
} else {
emitter.emit('error', err)
@@ -130,15 +134,16 @@ async function getPage(page: Permalink, emitter: EventEmitter, opts: DomWaiterOp
{ requestUrl: { pathname: page.url } },
)
// Add URL and path info directly to the HTTPError
- ;(httpError as any).url = page.url
- ;(httpError as any).relativePath = page.relativePath
+ ;(httpError as HTTPErrorWithUrl).url = page.url
+ ;(httpError as HTTPErrorWithUrl).relativePath = page.relativePath
// Emit error instead of throwing
emitter.emit('error', httpError)
return // Exit early, don't continue processing
}
const body = await response.text()
const pageCopy = Object.assign({}, page, { body })
- if (opts.parseDOM) (pageCopy as any).$ = cheerio.load(body)
+ if (opts.parseDOM)
+ (pageCopy as Permalink & { $?: ReturnType }).$ = cheerio.load(body)
emitter.emit('page', pageCopy)
} catch (err) {
// Enhance error with URL information
@@ -146,8 +151,8 @@ async function getPage(page: Permalink, emitter: EventEmitter, opts: DomWaiterOp
const enhancedError = new Error(err.message, { cause: err.cause })
enhancedError.name = err.name
enhancedError.stack = err.stack
- ;(enhancedError as any).url = page.url
- ;(enhancedError as any).relativePath = page.relativePath
+ ;(enhancedError as ErrorWithUrl).url = page.url
+ ;(enhancedError as ErrorWithUrl).relativePath = page.relativePath
emitter.emit('error', enhancedError)
} else {
emitter.emit('error', err)
diff --git a/src/search/tests/sanitize-search-query.ts b/src/search/tests/sanitize-search-query.ts
new file mode 100644
index 0000000000..7e910ec562
--- /dev/null
+++ b/src/search/tests/sanitize-search-query.ts
@@ -0,0 +1,218 @@
+import { describe, expect, test } from 'vitest'
+import { sanitizeSearchQuery } from '@/search/lib/sanitize-search-query'
+
+describe('sanitizeSearchQuery', () => {
+ test('returns empty string for empty input', () => {
+ expect(sanitizeSearchQuery('')).toBe('')
+ })
+
+ test('returns query unchanged if no PII detected', () => {
+ expect(sanitizeSearchQuery('how to create a repository')).toBe('how to create a repository')
+ expect(sanitizeSearchQuery('git commit message')).toBe('git commit message')
+ })
+
+ describe('email redaction', () => {
+ test('redacts single email address', () => {
+ expect(sanitizeSearchQuery('contact user@example.com for help')).toBe(
+ 'contact [EMAIL] for help',
+ )
+ })
+
+ test('redacts multiple email addresses', () => {
+ expect(sanitizeSearchQuery('email john@example.com or jane@test.org')).toBe(
+ 'email [EMAIL] or [EMAIL]',
+ )
+ })
+
+ test('redacts emails with special characters', () => {
+ expect(sanitizeSearchQuery('user.name+tag@example.co.uk')).toBe('[EMAIL]')
+ })
+ })
+
+ describe('GitHub token redaction', () => {
+ test('redacts classic personal access tokens (ghp_)', () => {
+ expect(sanitizeSearchQuery('token ghp_1234567890123456789012345678901234567890')).toBe(
+ 'token [TOKEN]',
+ )
+ })
+
+ test('redacts OAuth tokens (gho_)', () => {
+ expect(sanitizeSearchQuery('oauth gho_1234567890123456789012345678901234567890')).toBe(
+ 'oauth [TOKEN]',
+ )
+ })
+
+ test('redacts user tokens (ghu_)', () => {
+ expect(sanitizeSearchQuery('user ghu_1234567890123456789012345678901234567890')).toBe(
+ 'user [TOKEN]',
+ )
+ })
+
+ test('redacts server tokens (ghs_)', () => {
+ expect(sanitizeSearchQuery('server ghs_1234567890123456789012345678901234567890')).toBe(
+ 'server [TOKEN]',
+ )
+ })
+
+ test('redacts refresh tokens (ghr_)', () => {
+ expect(sanitizeSearchQuery('refresh ghr_1234567890123456789012345678901234567890')).toBe(
+ 'refresh [TOKEN]',
+ )
+ })
+
+ test('redacts fine-grained PATs (github_pat_)', () => {
+ expect(
+ sanitizeSearchQuery('fine-grained github_pat_1234567890123456789012345678901234567890'),
+ ).toBe('fine-grained [TOKEN]')
+ })
+
+ test('redacts tokens with minimum length (20 chars)', () => {
+ expect(sanitizeSearchQuery('short ghp_12345678901234567890')).toBe('short [TOKEN]')
+ })
+
+ test('does not redact partial token prefixes', () => {
+ expect(sanitizeSearchQuery('ghp is not a token')).toBe('ghp is not a token')
+ })
+ })
+
+ describe('UUID redaction', () => {
+ test('redacts standard UUIDs', () => {
+ expect(sanitizeSearchQuery('id 550e8400-e29b-41d4-a716-446655440000 found')).toBe(
+ 'id [UUID] found',
+ )
+ })
+
+ test('redacts UUIDs regardless of case', () => {
+ expect(sanitizeSearchQuery('UUID 550E8400-E29B-41D4-A716-446655440000')).toBe('UUID [UUID]')
+ })
+
+ test('redacts multiple UUIDs', () => {
+ expect(
+ sanitizeSearchQuery(
+ '550e8400-e29b-41d4-a716-446655440000 and 6ba7b810-9dad-11d1-80b4-00c04fd430c8',
+ ),
+ ).toBe('[UUID] and [UUID]')
+ })
+ })
+
+ describe('JWT redaction', () => {
+ test('redacts JWT tokens', () => {
+ const jwt =
+ 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
+ expect(sanitizeSearchQuery(`token ${jwt}`)).toBe('token [JWT]')
+ })
+
+ test('redacts JWT-like tokens with underscores and hyphens', () => {
+ expect(sanitizeSearchQuery('eyABC123_-XYZ.eyDEF456_-UVW.eyGHI789_-RST')).toBe('[JWT]')
+ })
+ })
+
+ describe('IP address redaction', () => {
+ test('redacts valid IPv4 addresses', () => {
+ expect(sanitizeSearchQuery('server 192.168.1.1 down')).toBe('server [IP] down')
+ expect(sanitizeSearchQuery('10.0.0.1')).toBe('[IP]')
+ expect(sanitizeSearchQuery('172.16.254.1')).toBe('[IP]')
+ })
+
+ test('redacts edge case IPs (0.0.0.0 and 255.255.255.255)', () => {
+ expect(sanitizeSearchQuery('0.0.0.0')).toBe('[IP]')
+ expect(sanitizeSearchQuery('255.255.255.255')).toBe('[IP]')
+ })
+
+ test('does not redact invalid IPs with octets > 255', () => {
+ expect(sanitizeSearchQuery('999.999.999.999')).toBe('999.999.999.999')
+ expect(sanitizeSearchQuery('256.1.1.1')).toBe('256.1.1.1')
+ })
+
+ test('redacts multiple IP addresses', () => {
+ expect(sanitizeSearchQuery('connect 10.0.0.1 or 192.168.1.1')).toBe('connect [IP] or [IP]')
+ })
+ })
+
+ describe('SSH private key redaction', () => {
+ test('redacts RSA private key headers', () => {
+ expect(sanitizeSearchQuery('-----BEGIN RSA PRIVATE KEY----- content')).toBe(
+ '[SSH_KEY] content',
+ )
+ })
+
+ test('redacts generic private key headers', () => {
+ expect(sanitizeSearchQuery('-----BEGIN PRIVATE KEY----- content')).toBe('[SSH_KEY] content')
+ })
+
+ test('redacts EC private key headers', () => {
+ expect(sanitizeSearchQuery('-----BEGIN EC PRIVATE KEY----- content')).toBe(
+ '[SSH_KEY] content',
+ )
+ })
+ })
+
+ describe('high-entropy string redaction', () => {
+ test('redacts long high-entropy strings with mixed case and numbers', () => {
+ // 40+ chars with lowercase, uppercase, and numbers
+ const secret = 'aB3dEf9Gh2IjKlMn0PqRsTuVwXyZ1234567890aBcDeF'
+ expect(sanitizeSearchQuery(secret)).toBe('[SECRET]')
+ })
+
+ test('redacts strings with lowercase and numbers', () => {
+ const secret = 'abc123def456ghi789jkl012mno345pqr678stu901vwx234'
+ expect(sanitizeSearchQuery(secret)).toBe('[SECRET]')
+ })
+
+ test('redacts strings with uppercase and numbers', () => {
+ const secret = 'ABC123DEF456GHI789JKL012MNO345PQR678STU901VWX234'
+ expect(sanitizeSearchQuery(secret)).toBe('[SECRET]')
+ })
+
+ test('does not redact long strings with only lowercase', () => {
+ const notSecret = 'thisisalongstringwithnouppercharsornumbers'
+ expect(sanitizeSearchQuery(notSecret)).toBe(notSecret)
+ })
+
+ test('does not redact long strings with only numbers', () => {
+ const notSecret = '12345678901234567890123456789012345678901234567890'
+ expect(sanitizeSearchQuery(notSecret)).toBe(notSecret)
+ })
+
+ test('does not redact strings shorter than 40 chars', () => {
+ const shortString = 'aB3dEf9Gh2IjKlMn0PqRsTuVwXyZ'
+ expect(sanitizeSearchQuery(shortString)).toBe(shortString)
+ })
+ })
+
+ describe('multiple PII types in one query', () => {
+ test('redacts all PII types in a single query', () => {
+ const query =
+ 'email user@example.com token ghp_1234567890123456789012345678901234567890 from 192.168.1.1'
+ expect(sanitizeSearchQuery(query)).toBe('email [EMAIL] token [TOKEN] from [IP]')
+ })
+
+ test('handles complex mixed query', () => {
+ const query = `
+ Contact admin@github.com
+ Token: github_pat_12345678901234567890ABCDEFGH
+ UUID: 550e8400-e29b-41d4-a716-446655440000
+ Server: 10.0.0.1
+ `.trim()
+ const result = sanitizeSearchQuery(query)
+ expect(result).toContain('[EMAIL]')
+ expect(result).toContain('[TOKEN]')
+ expect(result).toContain('[UUID]')
+ expect(result).toContain('[IP]')
+ })
+ })
+
+ describe('preserves safe content', () => {
+ test('preserves URLs without emails', () => {
+ expect(sanitizeSearchQuery('https://github.com/docs')).toBe('https://github.com/docs')
+ })
+
+ test('preserves code snippets', () => {
+ expect(sanitizeSearchQuery('git commit -m "fix bug"')).toBe('git commit -m "fix bug"')
+ })
+
+ test('preserves version numbers', () => {
+ expect(sanitizeSearchQuery('node v18.0.0')).toBe('node v18.0.0')
+ })
+ })
+})
diff --git a/src/secret-scanning/data/public-docs.yml b/src/secret-scanning/data/public-docs.yml
index 3fb8c497a4..52d3682afc 100644
--- a/src/secret-scanning/data/public-docs.yml
+++ b/src/secret-scanning/data/public-docs.yml
@@ -4493,6 +4493,19 @@
hasValidityCheck: false
base64Supported: false
isduplicate: false
+- provider: Paddle
+ supportedSecret: Paddle Sandbox API Key
+ secretType: paddle_sandbox_api_key
+ versions:
+ fpt: '*'
+ ghec: '*'
+ ghes: '>=3.20'
+ isPublic: true
+ isPrivateWithGhas: true
+ hasPushProtection: false
+ hasValidityCheck: false
+ base64Supported: false
+ isduplicate: false
- provider: PagerDuty
supportedSecret: PagerDuty OAuth Secret
secretType: pagerduty_oauth_secret
diff --git a/src/secret-scanning/lib/config.json b/src/secret-scanning/lib/config.json
index 60b1933ea2..611f6c1a28 100644
--- a/src/secret-scanning/lib/config.json
+++ b/src/secret-scanning/lib/config.json
@@ -1,5 +1,5 @@
{
- "sha": "01009b022a23f59bee88e60046c6b425178c3cab",
- "blob-sha": "5fb38d54763b5d5170e70b153a2d0ddeb5bed7c9",
+ "sha": "b68ab4c2355b44a07d40d669fd28da652fe1929e",
+ "blob-sha": "403271a4c5adc2dc195b04553c514833765b388d",
"targetFilename": "code-security/secret-scanning/introduction/supported-secret-scanning-patterns"
}
\ No newline at end of file
diff --git a/src/types/json-schema-merge-allof.d.ts b/src/types/json-schema-merge-allof.d.ts
index 83f021e0bf..d4e2d3512e 100644
--- a/src/types/json-schema-merge-allof.d.ts
+++ b/src/types/json-schema-merge-allof.d.ts
@@ -9,7 +9,7 @@ declare module 'json-schema-merge-allof' {
type?: string | string[]
items?: JSONSchema | JSONSchema[]
additionalProperties?: boolean | JSONSchema
- [key: string]: any // JSON Schema allows arbitrary additional properties per spec
+ [key: string]: unknown // JSON Schema allows arbitrary additional properties per spec
}
/**
@@ -23,7 +23,7 @@ declare module 'json-schema-merge-allof' {
*/
resolvers?: Record<
string,
- (values: any[], path: string[], mergeSchemas: any, options: any) => any
+ (values: unknown[], path: string[], mergeSchemas: unknown, options: unknown) => unknown
>
/**