1
0
mirror of synced 2025-12-19 18:10:59 -05:00

Merge pull request #41622 from github/repo-sync

Repo sync
This commit is contained in:
docs-bot
2025-12-01 10:38:14 -08:00
committed by GitHub
112 changed files with 1036 additions and 432 deletions

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -22,7 +22,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -25,7 +25,7 @@ jobs:
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Check team membership
id: membership_check

View File

@@ -24,7 +24,7 @@ jobs:
REPORT_REPOSITORY: github/docs-content
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# To prevent issues with cloning early access content later
persist-credentials: 'false'

View File

@@ -37,7 +37,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'pull_request_target' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'pull_request_target' }}

View File

@@ -33,7 +33,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7
with:
languages: javascript # comma separated list of values from {go, python, javascript, java, cpp, csharp, ruby}

View File

@@ -83,7 +83,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'pull_request_target' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'pull_request_target' }}
with:

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Node and dependencies
uses: ./.github/actions/node-npm-setup

View File

@@ -25,7 +25,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -73,7 +73,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'workflow_dispatch' && github.repository == 'github/docs-internal' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'workflow_dispatch' && github.repository == 'github/docs-internal' }}
with:

View File

@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6.0.0
- name: 'Ensure ${{ env.CHANGELOG_FILE }} exists'
run: |

View File

@@ -60,10 +60,10 @@ jobs:
language_repo: github/docs-internal.ko-kr
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Checkout the language-specific repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: ${{ matrix.language_repo }}
token: ${{ secrets.DOCS_BOT_PAT_BASE }}

View File

@@ -20,7 +20,7 @@ jobs:
steps:
- name: Check out repo content
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Setup Node.js
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
# Add to the FR project
# and set type to "Maintenance"

View File

@@ -32,12 +32,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Checkout codeql repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/codeql
path: codeql

View File

@@ -37,7 +37,7 @@ jobs:
timeout-minutes: 60
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/setup-elasticsearch

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- id: membership_check
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd

View File

@@ -23,14 +23,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- uses: ./.github/actions/setup-elasticsearch
if: ${{ github.event_name == 'pull_request' }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
repository: github/docs-internal-data

View File

@@ -37,10 +37,10 @@ jobs:
if: github.repository == 'github/docs-internal'
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Clone docs-internal-data
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal-data
# This works because user `docs-bot` has read access to that private repo.

View File

@@ -87,7 +87,7 @@ jobs:
- name: Check out repo
if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() && github.event_name != 'workflow_dispatch' }}
@@ -115,10 +115,10 @@ jobs:
language: ${{ fromJSON(needs.figureOutMatrix.outputs.matrix) }}
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Clone docs-internal-data
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal-data
# This works because user `docs-bot` has read access to that private repo.

View File

@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -23,7 +23,7 @@ jobs:
run: gh --version
- name: Check out repo's default branch
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
@@ -37,7 +37,7 @@ jobs:
- name: Check out docs-early-access too, if internal repo
if: ${{ github.repository == 'github/docs-internal' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-early-access
token: ${{ secrets.DOCS_BOT_PAT_BASE }}

View File

@@ -26,7 +26,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -23,7 +23,7 @@ jobs:
run: gh --version
- name: Check out repo's default branch
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Node and dependencies
uses: ./.github/actions/node-npm-setup

View File

@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
- name: Update list of allowed IPs
run: |

View File

@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -31,7 +31,7 @@ jobs:
repo-token: ${{ secrets.DOCS_BOT_PAT_BASE }}
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -45,7 +45,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:

View File

@@ -35,7 +35,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:

View File

@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
with:
@@ -41,7 +41,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9
with:

View File

@@ -57,7 +57,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:

View File

@@ -26,7 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -26,7 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout English repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# Using a PAT is necessary so that the new commit will trigger the
# CI in the PR. (Events from GITHUB_TOKEN don't trigger new workflows.)

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo content
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Check if this run was triggered by a member of the docs team
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd

View File

@@ -25,7 +25,7 @@ jobs:
if: github.repository == 'github/docs-internal' || github.repository == 'github/docs'
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Setup Node.js
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0

View File

@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo with full history
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0

View File

@@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo content
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/docs-internal
token: ${{ secrets.DOCS_BOT_PAT_BASE }}

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Sync repo to branch
uses: repo-sync/github-sync@3832fe8e2be32372e1b3970bbae8e7079edeec88

View File

@@ -38,7 +38,7 @@ jobs:
PR_NUMBER: ${{ github.event.pull_request.number }}
steps:
- name: check out repo content
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Set APP_URL

View File

@@ -36,7 +36,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
- name: Add content systems as a reviewer
uses: ./.github/actions/retry-command

View File

@@ -37,7 +37,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
- name: Add dependabot as a reviewer
uses: ./.github/actions/retry-command

View File

@@ -47,7 +47,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
- name: Add docs engineering as a reviewer
uses: ./.github/actions/retry-command

View File

@@ -32,7 +32,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6.0.0
- name: Get changed files
id: changed_files

View File

@@ -27,10 +27,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout docs-internal
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: checkout public site-policy
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: github/site-policy
token: ${{ secrets.API_TOKEN_SITEPOLICY }}

View File

@@ -38,7 +38,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:

View File

@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -30,11 +30,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
# Check out a nested repository inside of previous checkout
- name: Checkout semmle-code repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out

View File

@@ -23,7 +23,7 @@ jobs:
ignored-types: ${{ steps.sync.outputs.ignored-types }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- name: Run updater scripts
id: sync
@@ -82,7 +82,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
with:
slack_channel_id: ${{ secrets.DOCS_ALERTS_SLACK_CHANNEL_ID }}

View File

@@ -30,11 +30,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
# Check out a nested repository inside of previous checkout
- name: Checkout rest-api-description repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out
@@ -42,7 +42,7 @@ jobs:
path: rest-api-description
ref: ${{ inputs.SOURCE_BRANCH }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
# By default, only the most recent commit of the `main` branch
# will be checked out

View File

@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -27,7 +27,7 @@ jobs:
# Each of these ifs needs to be repeated at each step to make sure the required check still runs
# Even if if doesn't do anything
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -87,7 +87,7 @@ jobs:
# Each of these ifs needs to be repeated at each step to make sure the required check still runs
# Even if if doesn't do anything
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/setup-elasticsearch
if: ${{ matrix.name == 'search' || matrix.name == 'languages' }}

View File

@@ -43,7 +43,7 @@ jobs:
}
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -44,7 +44,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:
@@ -72,7 +72,7 @@ jobs:
- name: Check out repo
if: ${{ failure() }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/slack-alert
if: ${{ failure() }}
with:

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Get files changed
uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup

View File

@@ -34,10 +34,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repo's default branch
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: ./.github/actions/node-npm-setup
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
token: ${{ secrets.DOCS_BOT_PAT_BASE }}
repository: github/github

View File

@@ -28,7 +28,7 @@ jobs:
if: github.repository == 'github/docs-internal'
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1

View File

@@ -87,6 +87,6 @@ gh run watch
When using the REST API, you configure the `inputs` and `ref` as request body parameters. If the inputs are omitted, the default values defined in the workflow file are used.
> [!NOTE]
> You can define up to 10 `inputs` for a `workflow_dispatch` event.
> You can define up to {% ifversion fpt or ghec %}25 {% else %}10 {% endif %} `inputs` for a `workflow_dispatch` event.
For more information about using the REST API, see [AUTOTITLE](/rest/actions/workflows#create-a-workflow-dispatch-event).

View File

@@ -26,13 +26,17 @@ Your billing experience depends on whether your products are metered, volume-bas
## Billing cycles for metered products
Metered products have a fixed **billing period** that starts at 00:00:00 UTC on the first day of each month and ends at 23:59:59 UTC on the last day of the month.
Metered products, and all payments made using an Azure subscription ID, have a fixed **billing period** that starts at 00:00:00 UTC on the first day of each month and ends at 23:59:59 UTC on the last day of the month.
At the end of each month, your metered usage is calculated and scheduled to be billed on your **bill cycle day**.
At the end of each month, your metered usage is calculated and scheduled to be billed on your **billing date**. Accounts using an Azure subscription ID can access their specific billing date in the Azure commerce portal. For users with other payment methods:
{% ifversion fpt %}For personal accounts and organizations, your bill cycle day is typically the day you started a paid plan (not necessarily when the account was created).{% elsif ghec %}Your bill cycle day is typically determined by when you converted from a trial to a paid enterprise account.{% endif %} For example, if you {% ifversion fpt %}started a paid plan{% elsif ghec %}converted from a trial{% endif %} on the 15th of a month, you will be billed on the 15th of each subsequent month.
* **Personal and organization accounts:** This is typically the day you started a paid plan (not necessarily when the account was created).
* **Enterprise accounts:** This is typically determined by when you converted from a trial to a paid enterprise account.
> [!NOTE] If you are paying via an Azure subscription ID, your **billing period** will run from the first day of each month to the last day of the month. To access your specific **bill cycle day**, please visit the Azure commerce portal.
For example, if you started a paid plan or converted from a trial on the 15th of a month, you will be billed on the 15th of each subsequent month.
> [!NOTE]
> From **December 1, 2025**, all self-serve, metered {% data variables.product.prodname_ghe_cloud %} accounts that pay by credit card will migrate to a **billing date** of 1st of the month. See [Billing date standardized to the first of the month for self-serve credit card metered Enterprise customers](https://github.blog/changelog/2025-11-17-billing-date-standardized-to-the-first-of-the-month-for-self-serve-credit-card-metered-enterprise-customers-now-generally-available/) in the changelog.
## Billing cycles for volume-based products

View File

@@ -61,9 +61,11 @@ If required, {% data variables.product.prodname_ghe_cloud %} accounts can reques
Each account has a **billing date** and a **billing cycle**.
For credit card and PayPal payments, the billing date is the day you started a paid plan (not necessarily when the account was created). For example, if you started a paid plan on the 15th of a month, you will be billed on the 15th of each subsequent month. For payments using an Azure subscription ID, the billing date is available in the Azure commerce portal.
For **personal and organization accounts** set up for credit card and PayPal payments, the **billing date** is typically the day you started a paid plan (not necessarily when the account was created). For example, if you started a paid plan on the 15th of a month, you will be billed on the 15th of each subsequent month. For payments using an Azure subscription ID, the billing date is available in the Azure commerce portal.
Most users pay for {% data variables.product.github %} using metered billing. The billing cycle for all metered products is a fixed period from the first day to the last day of the month.
For **enterprise accounts**, your **billing date** will vary. See [AUTOTITLE](/billing/concepts/billing-cycles).
Most users pay for {% data variables.product.github %} using metered billing. The **billing cycle** for all metered products is a fixed period from the first day to the last day of the month.
### Authorization holds

View File

@@ -118,6 +118,22 @@ Alternatively, you can open the log folder for {% data variables.product.prodnam
* Use: <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>
1. Type "Logs", and then select **Developer: Open Extension Logs Folder** from the list.
## Enabling debug mode
If you find the log file doesn't contain enough information to resolve an issue, it may help to enable debug logging temporarily. This can be especially helpful for debugging network-related issues.
1. Open the {% data variables.product.prodname_vscode_command_palette_shortname %}
* For Mac:
* Use: <kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd>
* For Windows or Linux:
* Use: <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>
1. Type "Developer", then select **Developer: Set Log Level**.
1. Type "{% data variables.product.github %}", then select the {% data variables.product.github %} extension you're troubleshooting:
* **{% data variables.copilot.copilot_chat %}** for the {% data variables.copilot.copilot_chat_short %} extension.
* **{% data variables.product.github %}** for the {% data variables.copilot.copilot_extension %}.
1. Select **Trace** from the dropdown list.
1. When you have the information you need, disable debug mode by repeating steps 1 through 4 and returning the logging level to **Info**.
## Viewing network connectivity diagnostics logs
If you encounter problems connecting to {% data variables.product.prodname_copilot %} due to network restrictions, firewalls, or your proxy setup, use the following troubleshooting steps.

View File

@@ -37,7 +37,7 @@ const config: NextConfig = {
},
// Don't use automatic Next.js logging in dev unless the log level is `debug` or higher
// See `src/observability/logger/README.md` for log levels
logging: getLogLevelNumber() < 3 ? false : {},
logging: getLogLevelNumber() < 3 ? undefined : {},
async rewrites() {
const DEFAULT_VERSION = 'free-pro-team@latest'
return productIds.map((productId) => {
@@ -48,6 +48,11 @@ const config: NextConfig = {
})
},
webpack: (webpackConfig) => {
webpackConfig.resolve.fallback = { fs: false, async_hooks: false }
return webpackConfig
},
// Turbopack is the default bundler in Next.js 16
// Keep webpack config for now to support both bundlers
@@ -63,13 +68,6 @@ const config: NextConfig = {
},
},
webpack: (webpackConfig) => {
webpackConfig.experiments = webpackConfig.experiments || {}
webpackConfig.experiments.topLevelAwait = true
webpackConfig.resolve.fallback = { fs: false, async_hooks: false }
return webpackConfig
},
// https://nextjs.org/docs/api-reference/next.config.js/compression
compress: false,
@@ -79,18 +77,6 @@ const config: NextConfig = {
// the CDN marks the cached content as "fresh".
generateEtags: false,
experimental: {
// The output of our getServerSideProps() return large chunks of
// data because it contains our rendered Markdown.
// The default, for a "Large Page Data" warning is 128KB
// but many of our pages are much larger.
// The warning is: https://nextjs.org/docs/messages/large-page-data
largePageDataBytes: 1024 * 1024, // 1 MB
// This makes it so that going Back will scroll to the previous position
scrollRestoration: true,
},
compiler: {
styledComponents: true,
},

81
package-lock.json generated
View File

@@ -43,7 +43,7 @@
"file-type": "21.0.0",
"flat": "^6.0.1",
"github-slugger": "^2.0.0",
"glob": "11.0.2",
"glob": "11.1.0",
"hast-util-from-parse5": "^8.0.3",
"hast-util-to-string": "^3.0.1",
"hastscript": "^9.0.1",
@@ -57,7 +57,7 @@
"is-svg": "6.0.0",
"javascript-stringify": "^2.1.0",
"js-cookie": "^3.0.5",
"js-yaml": "^4.1.0",
"js-yaml": "^4.1.1",
"liquidjs": "^10.16.7",
"lodash": "^4.17.21",
"lodash-es": "^4.17.21",
@@ -2383,6 +2383,7 @@
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"license": "ISC",
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
@@ -2396,9 +2397,10 @@
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"license": "MIT",
"engines": {
"node": ">=12"
},
@@ -2410,6 +2412,7 @@
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"license": "MIT",
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
@@ -2426,6 +2429,7 @@
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
@@ -3326,15 +3330,6 @@
"node": ">=0.10"
}
},
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"optional": true,
"engines": {
"node": ">=14"
}
},
"node_modules/@pkgr/core": {
"version": "0.2.9",
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.2.9.tgz",
@@ -5646,6 +5641,7 @@
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
@@ -6843,6 +6839,8 @@
},
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
"license": "MIT"
},
"node_modules/ecdsa-sig-formatter": {
@@ -8512,11 +8510,12 @@
}
},
"node_modules/foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz",
"integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==",
"license": "ISC",
"dependencies": {
"cross-spawn": "^7.0.0",
"cross-spawn": "^7.0.6",
"signal-exit": "^4.0.1"
},
"engines": {
@@ -8530,6 +8529,7 @@
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"license": "ISC",
"engines": {
"node": ">=14"
},
@@ -8755,14 +8755,14 @@
"license": "ISC"
},
"node_modules/glob": {
"version": "11.0.2",
"resolved": "https://registry.npmjs.org/glob/-/glob-11.0.2.tgz",
"integrity": "sha512-YT7U7Vye+t5fZ/QMkBFrTJ7ZQxInIUjwyAjVj84CYXqgBdv30MFUPGnBR6sQaVq6Is15wYJUsnzTuWaGRBhBAQ==",
"license": "ISC",
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-11.1.0.tgz",
"integrity": "sha512-vuNwKSaKiqm7g0THUBu2x7ckSs3XJLXE+2ssL7/MfTGPLLcrJQ/4Uq1CjPTtO5cCIiRxqvN6Twy1qOwhL0Xjcw==",
"license": "BlueOak-1.0.0",
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^4.0.1",
"minimatch": "^10.0.0",
"foreground-child": "^3.3.1",
"jackspeak": "^4.1.1",
"minimatch": "^10.1.1",
"minipass": "^7.1.2",
"package-json-from-dist": "^1.0.0",
"path-scurry": "^2.0.0"
@@ -8789,11 +8789,12 @@
}
},
"node_modules/glob/node_modules/minimatch": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.1.tgz",
"integrity": "sha512-ethXTt3SGGR+95gudmqJ1eNhRO7eGEGIgYA9vnPatK4/etz2MEVDno5GMCibdMTuBMyElzIlgxMna3K94XDIDQ==",
"version": "10.1.1",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
"integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
"license": "BlueOak-1.0.0",
"dependencies": {
"brace-expansion": "^2.0.1"
"@isaacs/brace-expansion": "^5.0.0"
},
"engines": {
"node": "20 || >=22"
@@ -10210,9 +10211,10 @@
"license": "ISC"
},
"node_modules/jackspeak": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.0.1.tgz",
"integrity": "sha512-cub8rahkh0Q/bw1+GxP7aeSe29hHHn2V4m29nnDlvCdlgU+3UGxkZp7Z53jLUdpX3jdTO0nJZUDl3xvbWc2Xog==",
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.1.1.tgz",
"integrity": "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ==",
"license": "BlueOak-1.0.0",
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
@@ -10221,9 +10223,6 @@
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
"optionalDependencies": {
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/javascript-stringify": {
@@ -10267,7 +10266,9 @@
"license": "MIT"
},
"node_modules/js-yaml": {
"version": "4.1.0",
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
@@ -14915,6 +14916,7 @@
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
@@ -14927,12 +14929,14 @@
"node_modules/string-width-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"license": "MIT"
},
"node_modules/string-width-cjs/node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
@@ -15059,6 +15063,7 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
@@ -16818,6 +16823,7 @@
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
@@ -16834,6 +16840,7 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},

View File

@@ -188,7 +188,7 @@
"file-type": "21.0.0",
"flat": "^6.0.1",
"github-slugger": "^2.0.0",
"glob": "11.0.2",
"glob": "11.1.0",
"hast-util-from-parse5": "^8.0.3",
"hast-util-to-string": "^3.0.1",
"hastscript": "^9.0.1",
@@ -202,7 +202,7 @@
"is-svg": "6.0.0",
"javascript-stringify": "^2.1.0",
"js-cookie": "^3.0.5",
"js-yaml": "^4.1.0",
"js-yaml": "^4.1.1",
"liquidjs": "^10.16.7",
"lodash": "^4.17.21",
"lodash-es": "^4.17.21",

View File

@@ -34,10 +34,30 @@ interface ChatCompletionResponse {
}
}
export async function callModelsApi(promptWithContent: ChatCompletionRequest): Promise<string> {
export async function callModelsApi(
promptWithContent: ChatCompletionRequest,
verbose = false,
): Promise<string> {
let aiResponse: ChatCompletionChoice
// Set default model if none specified
if (!promptWithContent.model) {
promptWithContent.model = 'openai/gpt-4o'
if (verbose) {
console.log('⚠️ No model specified, using default: openai/gpt-4o')
}
}
try {
// Create an AbortController for timeout handling
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 180000) // 3 minutes
const startTime = Date.now()
if (verbose) {
console.log(`🚀 Making API request to GitHub Models using ${promptWithContent.model}...`)
}
const response = await fetch(modelsCompletionsEndpoint, {
method: 'post',
body: JSON.stringify(promptWithContent),
@@ -45,16 +65,80 @@ export async function callModelsApi(promptWithContent: ChatCompletionRequest): P
'Content-Type': 'application/json',
Authorization: `Bearer ${process.env.GITHUB_TOKEN}`,
'X-GitHub-Api-Version': '2022-11-28',
Accept: 'Accept: application/vnd.github+json',
Accept: 'application/vnd.github+json',
},
signal: controller.signal,
})
const fetchTime = Date.now() - startTime
if (verbose) {
console.log(`⏱️ API response received in ${fetchTime}ms`)
}
clearTimeout(timeoutId)
if (!response.ok) {
let errorMessage = `HTTP error! status: ${response.status} - ${response.statusText}`
// Try to get more detailed error information
try {
const errorBody = await response.json()
if (errorBody.error && errorBody.error.message) {
errorMessage += ` - ${errorBody.error.message}`
}
} catch {
// If we can't parse error body, continue with basic error
}
// Add helpful hints for common errors
if (response.status === 401) {
errorMessage += ' (Check your GITHUB_TOKEN)'
} else if (response.status === 400) {
errorMessage += ' (This may be due to an invalid model or malformed request)'
} else if (response.status === 429) {
errorMessage += ' (Rate limit exceeded - try again later)'
}
throw new Error(errorMessage)
}
const data: ChatCompletionResponse = await response.json()
if (!data.choices || data.choices.length === 0) {
throw new Error('No response choices returned from API')
}
aiResponse = data.choices[0]
if (verbose) {
const totalTime = Date.now() - startTime
console.log(`✅ Total API call completed in ${totalTime}ms`)
if (data.usage) {
console.log(
`📊 Tokens: ${data.usage.prompt_tokens} prompt + ${data.usage.completion_tokens} completion = ${data.usage.total_tokens} total`,
)
}
}
} catch (error) {
console.error('Error calling GitHub Models REST API')
if (error instanceof Error) {
if (error.name === 'AbortError') {
throw new Error('API call timed out after 3 minutes')
}
console.error('Error calling GitHub Models REST API:', error.message)
}
throw error
}
return aiResponse.message.content
return cleanAIResponse(aiResponse.message.content)
}
// Helper function to clean up AI response content
function cleanAIResponse(content: string): string {
// Remove markdown code blocks
return content
.replace(/^```[\w]*\n/gm, '') // Remove opening code blocks
.replace(/\n```$/gm, '') // Remove closing code blocks at end
.replace(/\n```\n/gm, '\n') // Remove standalone closing code blocks
.trim()
}

View File

@@ -2,37 +2,20 @@ You are an expert SEO content optimizer specializing in GitHub documentation.
Your task is to analyze a GitHub Docs content file and generate or optimize
the intro frontmatter property following Google's meta description best practices.
## Your mission
## Core Requirements
Generate a single, concise intro (one simple sentence maximum - NO colons, NO detailed explanations) that:
**Primary constraints (must-haves):**
* Start with action verb ("Learn," "Access," "Explore," "Configure," "Set up," "Build")
* One sentence maximum - NO colons, NO detailed explanations
* Avoid buzzwords: "leverage," "optimize," "maximize," "enhance," "streamline," "empower," "revolutionize," "seamlessly," "comprehensive," "enterprise-grade," "cutting-edge," "innovative," "game-changing," "next-generation," "world-class," "best-in-class," "state-of-the-art," "industry-leading," "robust," "scalable," "mission-critical," "synergistic," "holistic," "strategic," "transformative"
* Different approach than title - don't start with same words/phrases
* Lists 2-3 concrete outcomes maximum
* Starts with an action verb (e.g., "Learn," "Discover," "Access," "Explore," "Configure," "Set up," "Build")
* **Uses developer-friendly, direct language** - avoid marketing jargon and corporate buzzwords
* **Prioritizes conciseness over completeness** - cut unnecessary words ruthlessly
* Accurately summarizes the content's core value proposition
* Includes relevant keywords naturally without stuffing
* Follows Google's snippet guidelines (descriptive, informative, compelling)
* Is version-agnostic (no {% ifversion %} blocks, but {% data variables.* %} and {% data reusables.* %} are acceptable)
* Matches the content type (article/category/mapTopic) requirements
* **Goes beyond title restatement** - summarizes the complete article value, not just rephrasing the title
* **Lists concrete steps or outcomes** - what users will actually do or accomplish
* **Limits lists to 2-3 items maximum** - avoid long comma-separated sequences that feel overwhelming
## SEO scoring criteria (1-10 scale)
**10-9 (Excellent)**: Strong action verb, comprehensive content summary, optimal keyword density, clear unique value beyond title, perfect length
**8-7 (Good)**: Action verb present, good content representation, decent keywords, some unique value, appropriate length
**6-5 (Fair)**: Weak action verb or missing, partial content coverage, basic keywords, minimal value beyond title
**4-3 (Poor)**: No action verb, limited content representation, few relevant keywords, mostly restates title
**2-1 (Very Poor)**: Vague or misleading, no clear value proposition, poor keyword usage, completely redundant with title
## Analysis process
1. **Content resolution**: Keep {% data variables.* %} and {% data reusables.* %} but avoid {% ifversion %} blocks
2. **Content analysis**: Identify the article's purpose, target audience, key concepts, and user outcomes
3. **Category detection**: For index pages, analyze child content themes and collective value
4. **SEO optimization**: Use strong action verbs, developer-friendly language, concrete outcomes, and relevant keywords while avoiding corporate buzzwords
**Secondary optimizations (nice-to-haves):**
* Include relevant keywords naturally
* Version-agnostic ({% data variables.* %} OK, avoid {% ifversion %})
* Follow Google snippet guidelines
* Cut unnecessary words ruthlessly
**Content Summarization vs. Title Restatement**:
@@ -47,7 +30,7 @@ Generate a single, concise intro (one simple sentence maximum - NO colons, NO de
- Better: "Use {% data variables.product.prodname_copilot %} chat and code completion to research syntax, practice coding, and master new programming languages faster"
**Use concise, developer-friendly language ({% data variables.* %} OK)**:
- Better intro: "Evaluate use cases, configure security settings, and run pilot trials to successfully deploy {% data variables.copilot.copilot_coding_agent %} in your org"
- Better intro: "Evaluate use cases, configure security settings, and run pilot trials to deploy {% data variables.copilot.copilot_coding_agent %} in your org"
**Avoid overly long lists and colon constructions**:
- Too long: "Scope issues, pick suitable tasks, iterate via PR comments, add repo instructions, enable MCP tools, and preinstall dependencies"
@@ -55,24 +38,13 @@ Generate a single, concise intro (one simple sentence maximum - NO colons, NO de
- Better: "Scope tasks, configure custom instructions, and iterate on pull requests to improve {% data variables.copilot.copilot_coding_agent %} performance"
- Better: "Use {% data variables.product.prodname_copilot %} features like chat and code completion to research syntax, build programs, and learn new programming languages faster"
**Tone Guidelines**:
- **Developer-friendly**: Use direct, practical language
- **Concise over complete**: Cut words ruthlessly
- **Action-oriented**: List what users will actually do
- **Avoid buzzwords**: Skip marketing language and corporate jargon
- **Use concrete verbs**: Instead of "maximize/optimize/enhance" → use "improve," "boost," "increase," or just describe the outcome directly
- **Limit lists**: Maximum 2-3 items in comma-separated lists - prefer flowing sentences over exhaustive enumerations
- **Avoid colon constructions**: Don't use "Do X: detailed explanation of A, B, and C" format - keep it simple and direct
- **Avoid title similarity**: Don't start with the same words/phrases as the article title - approach the topic from a different angle
## Quality Checklist
The intro should answer: "What specific steps will I take?" rather than "What will this comprehensive solution provide?"
## Analysis Process
1. **First Draft**: Generate an initial improved intro following all guidelines above
2. **Title Check**: Compare your draft to the article title - if it starts with similar words, rewrite with a different approach
3. **Self-Review**: Evaluate your draft against the SEO scoring criteria and tone guidelines
4. **Refinement**: If the draft contains buzzwords, weak verbs, title similarity, or scores below 8/10, create a refined version
**Structure**: Action verb + 2-3 concrete outcomes + under 350 characters
**Language**: Direct, practical developer language (no marketing jargon)
**Focus**: What users will DO, not what solution "provides"
**Uniqueness**: Different angle from article title
**Simplicity**: No colons, no complex lists, flowing sentences
## Output format
@@ -84,27 +56,12 @@ Title: "[Article title from frontmatter]"
Original intro: "[Current intro from the article, or "No intro" if none exists]"
Original SEO score: [X]/10
------------------------
Improved intro: "[Single, concise intro that summarizes the article's full content value, not just restating the title]"
Improved SEO score: [X]/10
SEO-friendly alternative: "[Single, concise intro that summarizes the article's full content value, not just restating the title]"
------------------------
```
Note: The improved score should reflect your best attempt after internal refinement.
## Character limits by content type
**Priority: Conciseness over character limits**
- Focus on being as concise as possible while maintaining clarity
- Cut every unnecessary word before considering length
- Developer-friendly brevity trumps hitting character targets
**Technical limits** (for reference):
- **Articles**: Maximum 354 characters
- **Categories**: Maximum 362 characters
- **Map Topics**: Maximum 362 characters
@@ -125,3 +82,17 @@ Note: The improved score should reflect your best attempt after internal refinem
- {% data variables.copilot.copilot_coding_agent %} = "Copilot Coding Agent"
Focus on creating intros that would make sense to someone discovering this content through Google search, clearly communicating the value and relevance of the article.
<!-- IF_WRITE_MODE -->
## WRITE MODE INSTRUCTIONS
**CRITICAL**: You are in write mode. Output ONLY the YAML frontmatter property to update.
- Return just: `intro: "your improved intro text"`
- Do NOT include analysis, scoring, explanations, or formatting
- Do NOT wrap in markdown code blocks or ```yaml
- Do NOT include the analysis format shown above
- Just return the clean YAML property line
<!-- END_WRITE_MODE -->

View File

@@ -6,4 +6,6 @@ messages:
content: >-
Review this content file according to the provided system prompt.
{{input}}
model: openai/gpt-5
model: openai/gpt-4o # Reliable model that works
temperature: 0.3 # Lower temperature for consistent results
max_completion_tokens: 4000 # Maximum response length

View File

@@ -7,6 +7,8 @@ import ora from 'ora'
import { execSync } from 'child_process'
import { callModelsApi } from '@/ai-tools/lib/call-models-api'
import dotenv from 'dotenv'
import readFrontmatter from '@/frame/lib/read-frontmatter'
import { schema } from '@/frame/lib/frontmatter'
dotenv.config({ quiet: true })
const __dirname = path.dirname(fileURLToPath(import.meta.url))
@@ -28,35 +30,92 @@ if (!process.env.GITHUB_TOKEN) {
}
}
interface EditorType {
description: string
// Dynamically discover available editor types from prompt files
const getAvailableEditorTypes = (): string[] => {
const editorTypes: string[] = []
try {
const promptFiles = fs.readdirSync(promptDir)
for (const file of promptFiles) {
if (file.endsWith('.md')) {
const editorName = path.basename(file, '.md')
editorTypes.push(editorName)
}
}
} catch {
console.warn('Could not read prompts directory, using empty editor types')
}
interface EditorTypes {
versioning: EditorType
intro: EditorType
return editorTypes
}
const editorTypes: EditorTypes = {
versioning: {
description: 'Refine versioning according to simplification guidance.',
},
intro: {
description: 'Refine intro frontmatter based on SEO and content guidelines.',
},
const editorTypes = getAvailableEditorTypes()
// Enhanced recursive markdown file finder with symlink, depth, and root path checks
const findMarkdownFiles = (
dir: string,
rootDir: string,
depth: number = 0,
maxDepth: number = 20,
visited: Set<string> = new Set(),
): string[] => {
const markdownFiles: string[] = []
let realDir: string
try {
realDir = fs.realpathSync(dir)
} catch {
// If we can't resolve real path, skip this directory
return []
}
// Prevent escaping root directory
if (!realDir.startsWith(rootDir)) {
return []
}
// Prevent symlink loops
if (visited.has(realDir)) {
return []
}
visited.add(realDir)
// Prevent excessive depth
if (depth > maxDepth) {
return []
}
let entries: fs.Dirent[]
try {
entries = fs.readdirSync(realDir, { withFileTypes: true })
} catch {
// If we can't read directory, skip
return []
}
for (const entry of entries) {
const fullPath = path.join(realDir, entry.name)
let realFullPath: string
try {
realFullPath = fs.realpathSync(fullPath)
} catch {
continue
}
// Prevent escaping root directory for files
if (!realFullPath.startsWith(rootDir)) {
continue
}
if (entry.isDirectory()) {
markdownFiles.push(...findMarkdownFiles(realFullPath, rootDir, depth + 1, maxDepth, visited))
} else if (entry.isFile() && entry.name.endsWith('.md')) {
markdownFiles.push(realFullPath)
}
}
return markdownFiles
}
const refinementDescriptions = (): string => {
let str = '\n\n'
for (const [ed, edObj] of Object.entries(editorTypes)) {
str += ` ${ed.padEnd(12)} ${edObj.description}\n`
}
return str
return editorTypes.join(', ')
}
interface CliOptions {
verbose?: boolean
refine: Array<keyof EditorTypes>
prompt?: string[]
refine?: string[]
files: string[]
write?: boolean
}
@@ -71,9 +130,10 @@ program
'-w, --write',
'Write changes back to the original files (default: output to console only)',
)
.requiredOption(
.option('-p, --prompt <type...>', `Specify one or more prompt type: ${refinementDescriptions()}`)
.option(
'-r, --refine <type...>',
`Specify one or more refinement type: ${refinementDescriptions().trimEnd()}\n`,
`(Deprecated: use --prompt) Specify one or more prompt type: ${refinementDescriptions()}`,
)
.requiredOption(
'-f, --files <files...>',
@@ -84,7 +144,30 @@ program
const spinner = ora('Starting AI review...').start()
const files = options.files
const editors = options.refine
// Handle both --prompt and --refine options for backwards compatibility
const prompts = options.prompt || options.refine
if (!prompts || prompts.length === 0) {
spinner.fail('No prompt type specified. Use --prompt or --refine with one or more types.')
process.exitCode = 1
return
}
// Validate that all requested editor types exist
const availableEditors = editorTypes
for (const editor of prompts) {
if (!availableEditors.includes(editor)) {
spinner.fail(
`Unknown prompt type: ${editor}. Available types: ${availableEditors.join(', ')}`,
)
process.exitCode = 1
return
}
}
if (options.verbose) {
console.log(`Processing ${files.length} files with prompts: ${prompts.join(', ')}`)
}
for (const file of files) {
const filePath = path.resolve(process.cwd(), file)
@@ -96,37 +179,101 @@ program
continue
}
try {
spinner.text = `Reading file: ${file}`
const content = fs.readFileSync(filePath, 'utf8')
// Check if it's a directory
const isDirectory = fs.statSync(filePath).isDirectory()
for (const editorType of editors) {
spinner.text = `Running the AI-powered ${editorType} refinement...`
const answer = await callEditor(editorType, content, options.write || false)
for (const editorType of prompts) {
try {
// For other editor types, process individual files
const filesToProcess: string[] = []
if (isDirectory) {
// Find all markdown files in the directory recursively
// Use process.cwd() as the root directory for safety
const rootDir = fs.realpathSync(process.cwd())
filesToProcess.push(...findMarkdownFiles(filePath, rootDir))
if (filesToProcess.length === 0) {
spinner.warn(`No markdown files found in directory: ${file}`)
continue
}
spinner.text = `Found ${filesToProcess.length} markdown files in ${file}`
} else {
filesToProcess.push(filePath)
}
spinner.start()
for (const fileToProcess of filesToProcess) {
const relativePath = path.relative(process.cwd(), fileToProcess)
spinner.text = `Processing: ${relativePath}`
try {
const content = fs.readFileSync(fileToProcess, 'utf8')
const answer = await callEditor(
editorType,
content,
options.write || false,
options.verbose || false,
)
spinner.stop()
if (options.write) {
// Write the result back to the original file
fs.writeFileSync(filePath, answer, 'utf8')
console.log(`✅ Updated: ${file}`)
if (editorType === 'intro') {
// For frontmatter addition/modification, merge properties instead of overwriting entire file
const updatedContent = mergeFrontmatterProperties(fileToProcess, answer)
fs.writeFileSync(fileToProcess, updatedContent, 'utf8')
console.log(`✅ Added frontmatter properties to: ${relativePath}`)
} else {
// For other editor types, write the full result back to the original file
fs.writeFileSync(fileToProcess, answer, 'utf8')
console.log(`✅ Updated: ${relativePath}`)
}
} else {
// Just output to console (current behavior)
if (filesToProcess.length > 1) {
console.log(`\n=== ${relativePath} ===`)
}
console.log(answer)
}
} catch (err) {
const error = err as Error
spinner.fail(`Error processing ${relativePath}: ${error.message}`)
process.exitCode = 1
} finally {
spinner.stop()
}
}
} catch (err) {
const error = err as Error
spinner.fail(`Error processing file ${file}: ${error.message}`)
const targetName = path.relative(process.cwd(), filePath)
spinner.fail(`Error processing ${targetName}: ${error.message}`)
process.exitCode = 1
}
}
}
spinner.stop()
// Exit with appropriate code based on whether any errors occurred
if (process.exitCode) {
process.exit(process.exitCode)
}
})()
})
program.parse(process.argv)
// Handle graceful shutdown
process.on('SIGINT', () => {
console.log('\n\n🛑 Process interrupted by user')
process.exit(0)
})
process.on('SIGTERM', () => {
console.log('\n\n🛑 Process terminated')
process.exit(0)
})
interface PromptMessage {
content: string
role: string
@@ -139,26 +286,111 @@ interface PromptData {
max_tokens?: number
}
async function callEditor(
editorType: keyof EditorTypes,
content: string,
writeMode: boolean,
): Promise<string> {
const markdownPromptPath = path.join(promptDir, `${editorType}.md`)
let markdownPrompt = fs.readFileSync(markdownPromptPath, 'utf8')
// Function to merge new frontmatter properties into existing file while preserving formatting
function mergeFrontmatterProperties(filePath: string, newPropertiesYaml: string): string {
const content = fs.readFileSync(filePath, 'utf8')
const parsed = readFrontmatter(content)
// For intro type in write mode, append special instructions
if (editorType === 'intro' && writeMode) {
markdownPrompt +=
'\n\n**WRITE MODE**: Output only the complete updated file content with the new intro in the frontmatter. Do not include analysis or explanations - just return the file ready to write.'
if (parsed.errors && parsed.errors.length > 0) {
throw new Error(
`Failed to parse frontmatter: ${parsed.errors.map((e) => e.message).join(', ')}`,
)
}
if (!parsed.content) {
throw new Error('Failed to parse content from file')
}
try {
// Clean up the AI response - remove markdown code blocks if present
let cleanedYaml = newPropertiesYaml.trim()
cleanedYaml = cleanedYaml.replace(/^```ya?ml\s*\n/i, '')
cleanedYaml = cleanedYaml.replace(/\n```\s*$/i, '')
cleanedYaml = cleanedYaml.trim()
interface FrontmatterProperties {
intro?: string
[key: string]: unknown
}
const newProperties = yaml.load(cleanedYaml) as FrontmatterProperties
// Security: Validate against prototype pollution using the official frontmatter schema
const allowedKeys = Object.keys(schema.properties)
const sanitizedProperties = Object.fromEntries(
Object.entries(newProperties).filter(([key]) => {
if (allowedKeys.includes(key)) {
return true
}
console.warn(`Filtered out potentially unsafe frontmatter key: ${key}`)
return false
}),
)
// Merge new properties with existing frontmatter
const mergedData: FrontmatterProperties = { ...parsed.data, ...sanitizedProperties }
// Manually ensure intro is wrapped in single quotes in the final output
let result = readFrontmatter.stringify(parsed.content, mergedData)
// Post-process to ensure intro field has single quotes
if (newProperties.intro) {
const introValue = newProperties.intro.toString()
// Replace any quote style on intro with single quotes
result = result.replace(
/^intro:\s*(['"`]?)([^'"`\n\r]+)\1?\s*$/m,
`intro: '${introValue.replace(/'/g, "''")}'`, // Escape single quotes by doubling them
)
}
return result
} catch (error) {
console.error('Failed to parse AI response as YAML:')
console.error('Raw AI response:', JSON.stringify(newPropertiesYaml))
throw new Error(`Failed to parse new frontmatter properties: ${error}`)
}
}
async function callEditor(
editorType: string,
content: string,
writeMode: boolean,
verbose = false,
): Promise<string> {
const markdownPromptPath = path.join(promptDir, `${String(editorType)}.md`)
if (!fs.existsSync(markdownPromptPath)) {
throw new Error(`Prompt file not found: ${markdownPromptPath}`)
}
const markdownPrompt = fs.readFileSync(markdownPromptPath, 'utf8')
const prompt = yaml.load(fs.readFileSync(promptTemplatePath, 'utf8')) as PromptData
// Validate the prompt template has required properties
if (!prompt.messages || !Array.isArray(prompt.messages)) {
throw new Error('Invalid prompt template: missing or invalid messages array')
}
for (const msg of prompt.messages) {
msg.content = msg.content.replace('{{markdownPrompt}}', markdownPrompt)
msg.content = msg.content.replace('{{input}}', content)
// Replace writeMode template variable with simple string replacement
msg.content = msg.content.replace(
/<!-- IF_WRITE_MODE -->/g,
writeMode ? '' : '<!-- REMOVE_START -->',
)
msg.content = msg.content.replace(
/<!-- ELSE_WRITE_MODE -->/g,
writeMode ? '<!-- REMOVE_START -->' : '',
)
msg.content = msg.content.replace(
/<!-- END_WRITE_MODE -->/g,
writeMode ? '' : '<!-- REMOVE_END -->',
)
// Remove sections marked for removal
msg.content = msg.content.replace(/<!-- REMOVE_START -->[\s\S]*?<!-- REMOVE_END -->/g, '')
}
return callModelsApi(prompt)
return callModelsApi(prompt, verbose)
}

View File

@@ -3,12 +3,13 @@ import { extractVersionFromPath } from '@/app/lib/version-utils'
import { getUIDataMerged } from '@/data-directory/lib/get-data'
import { type LanguageCode } from '@/languages/lib/languages'
import { createTranslationFunctions, translate } from '@/languages/lib/translation-utils'
import type { UIStrings } from '@/frame/components/context/MainContext'
export interface ServerAppRouterContext {
currentLanguage: LanguageCode
currentVersion: string
sitename: string
site: { data: { ui: any } }
site: { data: { ui: UIStrings } }
}
/**

View File

@@ -144,7 +144,11 @@ export default async function dynamicAssets(
assetCacheControl(res)
return res.type('image/webp').send(buffer)
} catch (catchError) {
if (catchError instanceof Error && (catchError as any).code !== 'ENOENT') {
if (
catchError instanceof Error &&
'code' in catchError &&
(catchError as NodeJS.ErrnoException).code !== 'ENOENT'
) {
throw catchError
}
}

View File

@@ -52,10 +52,12 @@ export const journeyTracksGuidePathExists = {
description: 'Journey track guide paths must reference existing content files',
tags: ['frontmatter', 'journey-tracks'],
function: (params: RuleParams, onError: RuleErrorCallback) => {
// Using any for frontmatter as it's a dynamic YAML object with varying properties
const fm: any = getFrontmatter(params.lines)
if (!fm || !fm.journeyTracks || !Array.isArray(fm.journeyTracks)) return
if (!fm.layout || fm.layout !== 'journey-landing') return
// Using unknown for frontmatter as it's a dynamic YAML object with varying properties
const fm: unknown = getFrontmatter(params.lines)
if (!fm || typeof fm !== 'object' || !('journeyTracks' in fm)) return
const fmObj = fm as Record<string, unknown>
if (!Array.isArray(fmObj.journeyTracks)) return
if (!('layout' in fmObj) || fmObj.layout !== 'journey-landing') return
const journeyTracksLine = params.lines.find((line: string) => line.startsWith('journeyTracks:'))
@@ -63,11 +65,13 @@ export const journeyTracksGuidePathExists = {
const journeyTracksLineNumber = params.lines.indexOf(journeyTracksLine) + 1
for (let trackIndex = 0; trackIndex < fm.journeyTracks.length; trackIndex++) {
const track: any = fm.journeyTracks[trackIndex]
if (track.guides && Array.isArray(track.guides)) {
for (let guideIndex = 0; guideIndex < track.guides.length; guideIndex++) {
const guide: string = track.guides[guideIndex]
for (let trackIndex = 0; trackIndex < fmObj.journeyTracks.length; trackIndex++) {
const track: unknown = fmObj.journeyTracks[trackIndex]
if (!track || typeof track !== 'object' || !('guides' in track)) continue
const trackObj = track as Record<string, unknown>
if (trackObj.guides && Array.isArray(trackObj.guides)) {
for (let guideIndex = 0; guideIndex < trackObj.guides.length; guideIndex++) {
const guide: string = trackObj.guides[guideIndex]
if (typeof guide === 'string') {
if (!isValidGuidePath(guide, params.name)) {
addError(

View File

@@ -10,9 +10,11 @@ export const journeyTracksUniqueIds = {
tags: ['frontmatter', 'journey-tracks', 'unique-ids'],
function: function GHD060(params: RuleParams, onError: RuleErrorCallback) {
// Using any for frontmatter as it's a dynamic YAML object with varying properties
const fm: any = getFrontmatter(params.lines)
if (!fm || !fm.journeyTracks || !Array.isArray(fm.journeyTracks)) return
if (!fm.layout || fm.layout !== 'journey-landing') return
const fm: unknown = getFrontmatter(params.lines)
if (!fm || typeof fm !== 'object' || !('journeyTracks' in fm)) return
const fmObj = fm as Record<string, unknown>
if (!Array.isArray(fmObj.journeyTracks)) return
if (!('layout' in fmObj) || fmObj.layout !== 'journey-landing') return
// Find the base journeyTracks line
const journeyTracksLine: string | undefined = params.lines.find((line: string) =>
@@ -37,7 +39,7 @@ export const journeyTracksUniqueIds = {
trackCount++
// Stop once we've found all the tracks we know exist
if (fm && fm.journeyTracks && trackCount >= fm.journeyTracks.length) {
if (Array.isArray(fmObj.journeyTracks) && trackCount >= fmObj.journeyTracks.length) {
break
}
}
@@ -48,11 +50,12 @@ export const journeyTracksUniqueIds = {
// Track seen journey track IDs and line number for error reporting
const seenIds = new Map<string, number>()
for (let index = 0; index < fm.journeyTracks.length; index++) {
const track: any = fm.journeyTracks[index]
if (!track || typeof track !== 'object') continue
for (let index = 0; index < fmObj.journeyTracks.length; index++) {
const track: unknown = fmObj.journeyTracks[index]
if (!track || typeof track !== 'object' || !('id' in track)) continue
const trackId = track.id
const trackObj = track as Record<string, unknown>
const trackId = trackObj.id
if (!trackId || typeof trackId !== 'string') continue
const currentLineNumber = getTrackLineNumber(index)

View File

@@ -15,8 +15,9 @@ import {
isAllVersions,
getFeatureVersionsObject,
isInAllGhes,
isGhesReleaseDeprecated,
} from '@/ghes-releases/scripts/version-utils'
import { deprecated, oldestSupported } from '@/versions/lib/enterprise-server-releases'
import { oldestSupported } from '@/versions/lib/enterprise-server-releases'
import type { RuleParams, RuleErrorCallback } from '@/content-linter/types'
export const liquidIfversionVersions = {
@@ -337,20 +338,10 @@ function updateConditionals(condTagItems: any[]) {
}
// Checks for features that are only available in no
// supported GHES releases
// TODO use isGhesReleaseDeprecated
if (item.versionsObjAll.ghes.startsWith('<=')) {
const releaseNumber = item.versionsObjAll.ghes.replace('<=', '').trim()
if (deprecated.includes(releaseNumber)) {
if (isGhesReleaseDeprecated(oldestSupported, item.versionsObjAll.ghes)) {
item.action.type = 'delete'
continue
}
} else if (item.versionsObjAll.ghes.startsWith('<')) {
const releaseNumber = item.versionsObjAll.ghes.replace('<', '').trim()
if (deprecated.includes(releaseNumber) || releaseNumber === oldestSupported) {
item.action.type = 'delete'
continue
}
}
}
if (item.versionsObj?.feature || item.fileVersionsFm?.feature) break

View File

@@ -284,6 +284,11 @@ async function main() {
// Ensure previous console logging is not truncated
console.log('\n')
const took = end - start
if (warningFileCount > 0 || errorFileCount > 0) {
spinner.info(
`💡 You can disable linter rules for specific lines or blocks of text. See https://gh.io/suppress-linter-rule.\n\n`,
)
}
spinner.info(
`🕦 Markdownlint finished in ${(took > 1000 ? took / 1000 : took).toFixed(1)} ${
took > 1000 ? 's' : 'ms'

View File

@@ -53,7 +53,7 @@ describe.skip('category pages', () => {
// Get links included in product index page.
// Each link corresponds to a product subdirectory (category).
// Example: "getting-started-with-github"
const contents = fs.readFileSync(productIndex, 'utf8') // TODO move to async
const contents = fs.readFileSync(productIndex, 'utf8')
const data = getFrontmatterData(contents)
const productDir = path.dirname(productIndex)
@@ -62,7 +62,6 @@ describe.skip('category pages', () => {
const categoryLinks = children
// Only include category directories, not standalone category files like content/actions/quickstart.md
.filter((link) => fs.existsSync(getPath(productDir, link, 'index')))
// TODO this should move to async, but you can't asynchronously define tests with vitest...
// Map those to the Markdown file paths that represent that category page index
const categoryPaths = categoryLinks.map((link) => getPath(productDir, link, 'index'))

View File

@@ -47,26 +47,27 @@ export const Tool = {
type: 'block' as const,
tagName: '',
// Liquid template objects don't have TypeScript definitions
templates: [] as any[],
templates: [] as unknown[],
// tagToken and remainTokens are Liquid internal types without TypeScript definitions
parse(tagToken: any, remainTokens: any) {
this.tagName = tagToken.name
parse(tagToken: unknown, remainTokens: unknown) {
const token = tagToken as { name: string; getText: () => string }
this.tagName = token.name
this.templates = []
const stream = this.liquid.parser.parseStream(remainTokens)
stream
.on(`tag:end${this.tagName}`, () => stream.stop())
// tpl is a Liquid template object without TypeScript definitions
.on('template', (tpl: any) => this.templates.push(tpl))
.on('template', (tpl: unknown) => this.templates.push(tpl))
.on('end', () => {
throw new Error(`tag ${tagToken.getText()} not closed`)
throw new Error(`tag ${token.getText()} not closed`)
})
stream.start()
},
// scope is a Liquid scope object, Generator yields/returns Liquid template values - no TypeScript definitions available
*render(scope: any): Generator<any, any, any> {
*render(scope: unknown): Generator<unknown, unknown, unknown> {
const output = yield this.liquid.renderer.renderTemplates(this.templates, scope)
return yield this.liquid.parseAndRender(template, {
tagName: this.tagName,

View File

@@ -1,6 +1,6 @@
import type { Response } from 'express'
import type { ExtendedRequest, Page } from '@/types'
import type { ExtendedRequest, Page, Context } from '@/types'
import contextualize from '@/frame/middleware/context/context'
import features from '@/versions/middleware/features'
import shortVersions from '@/versions/middleware/short-versions'
@@ -55,7 +55,7 @@ export async function allDocuments(options: Options): Promise<AllDocument[]> {
const next = () => {}
const res = {}
const pagePath = permalink.href
const context: any = {}
const context: Partial<Context> = {}
const req = {
path: pagePath,
language: permalink.languageCode,
@@ -68,7 +68,7 @@ export async function allDocuments(options: Options): Promise<AllDocument[]> {
await contextualize(req as ExtendedRequest, res as Response, next)
await shortVersions(req as ExtendedRequest, res as Response, next)
req.context.page = page
features(req as any, res as any, next)
features(req as ExtendedRequest, res as Response, next)
const title = fields.includes('title')
? await page.renderProp('title', req.context, { textOnly: true })

View File

@@ -2,7 +2,6 @@ import { getOctokit } from '@actions/github'
import { setOutput } from '@actions/core'
async function main(): Promise<void> {
// TODO Is there a lib function for this?
const { BRANCH_NAME, GITHUB_TOKEN } = process.env
if (!BRANCH_NAME) throw new Error("'BRANCH_NAME' env var not set")
if (!GITHUB_TOKEN) throw new Error("'GITHUB_TOKEN' env var not set")

View File

@@ -6,7 +6,7 @@ import { contentTypesEnum } from '@/frame/lib/frontmatter'
describe('POST /events', () => {
vi.setConfig({ testTimeout: 60 * 1000 })
async function checkEvent(data: any) {
async function checkEvent(data: unknown) {
if (!Array.isArray(data)) {
data = [data]
}

View File

@@ -439,19 +439,6 @@ describe('catches errors thrown in Page class', () => {
await expect(getPage).rejects.toThrowError('versions')
})
// TODO - UNSKIP WHEN GHAE IS UPDATED WITH SEMVER VERSIONING
test.skip('invalid versions frontmatter', async () => {
async function getPage() {
return await Page.init({
relativePath: 'page-with-invalid-product-version.md',
basePath: path.join(__dirname, '../../../src/fixtures/fixtures'),
languageCode: 'en',
})
}
await expect(getPage).rejects.toThrowError('versions')
})
test('English page with a version in frontmatter that its parent product is not available in', async () => {
async function getPage() {
return await Page.init({

View File

@@ -60,14 +60,15 @@ describe('pages module', () => {
// Page objects have dynamic properties from chain/lodash that aren't fully typed
for (const page of englishPages) {
for (const redirect of (page as any).redirect_from) {
for (const version of (page as any).applicableVersions) {
const pageObj = page as Record<string, unknown>
for (const redirect of pageObj.redirect_from as string[]) {
for (const version of pageObj.applicableVersions as string[]) {
const versioned = removeFPTFromPath(path.posix.join('/', version, redirect))
versionedRedirects.push({ path: versioned, file: (page as any).fullPath })
versionedRedirects.push({ path: versioned, file: pageObj.fullPath as string })
if (!redirectToFiles.has(versioned)) {
redirectToFiles.set(versioned, new Set<string>())
}
redirectToFiles.get(versioned)!.add((page as any).fullPath)
redirectToFiles.get(versioned)!.add(pageObj.fullPath as string)
}
}
}
@@ -97,7 +98,7 @@ describe('pages module', () => {
page.languageCode === 'en' && // only check English
!page.relativePath.includes('index.md') && // ignore TOCs
// Page class has dynamic frontmatter properties like 'allowTitleToDifferFromFilename' not in type definition
!(page as any).allowTitleToDifferFromFilename && // ignore docs with override
!(page as Record<string, unknown>).allowTitleToDifferFromFilename && // ignore docs with override
slugger.slug(decode(page.title)) !== path.basename(page.relativePath, '.md') &&
slugger.slug(decode(page.shortTitle || '')) !== path.basename(page.relativePath, '.md')
)
@@ -129,7 +130,7 @@ describe('pages module', () => {
const frontmatterErrors = chain(pages)
// .filter(page => page.languageCode === 'en')
// Page class has dynamic error properties like 'frontmatterErrors' not in type definition
.map((page) => (page as any).frontmatterErrors)
.map((page) => (page as Record<string, unknown>).frontmatterErrors)
.filter(Boolean)
.flatten()
.value()
@@ -149,7 +150,7 @@ describe('pages module', () => {
for (const page of pages) {
// Page class has dynamic properties like 'raw' markdown not in type definition
const markdown = (page as any).raw
const markdown = (page as Record<string, unknown>).raw as string
if (!patterns.hasLiquid.test(markdown)) continue
try {
await liquid.parse(markdown)

View File

@@ -7,6 +7,7 @@ import { loadSiteTree } from '@/frame/lib/page-data'
import nonEnterpriseDefaultVersion from '@/versions/lib/non-enterprise-default-version'
import { formatAjvErrors } from '@/tests/helpers/schemas'
import type { SiteTree, Tree } from '@/types'
import findPageInSiteTree from '@/frame/lib/find-page-in-site-tree'
const latestEnterpriseRelease = EnterpriseServerReleases.latest
@@ -37,15 +38,14 @@ describe('siteTree', () => {
const ghesSiteTree = siteTree.en[ghesLatest]
// Find a page in the tree that we know contains Liquid
// TODO: use new findPageInSiteTree helper when it's available
const pageWithDynamicTitle = ghesSiteTree.childPages
.find((child) => child.href === `/en/${ghesLatest}/admin`)
?.childPages.find(
(child) => child.href === `/en/${ghesLatest}/admin/installing-your-enterprise-server`,
const pageWithDynamicTitle = findPageInSiteTree(
ghesSiteTree,
siteTree.en[nonEnterpriseDefaultVersion],
`/en/${ghesLatest}/admin/installing-your-enterprise-server`,
)
// Confirm the raw title contains Liquid
expect(pageWithDynamicTitle?.page.title).toEqual(
expect(pageWithDynamicTitle.page.title).toEqual(
'Installing {% data variables.product.prodname_enterprise %}',
)
})

View File

@@ -146,8 +146,6 @@ function RestNavListItem({ category }: { category: ProductTreeNode }) {
},
{ rootMargin: '0px 0px -85% 0px' },
)
// TODO: When we add the ## About the {title} API to each operation
// we can remove the h2 here
const headingsList = Array.from(document.querySelectorAll('h2, h3'))
for (const heading of headingsList) {

View File

@@ -8,9 +8,20 @@ import {
import { TitleFromAutotitleError } from '@/content-render/unified/rewrite-local-links'
import Page from '@/frame/lib/page'
// Type aliases for error objects with token information
type ErrorWithToken = Error & { token: { file: string; getPosition: () => number[] } }
type ErrorWithTokenNoFile = Error & { token: { getPosition: () => number[] } }
type ErrorWithTokenNoPosition = Error & { token: { file: string } }
type ErrorWithTokenAndOriginal = Error & {
token: { file: string; getPosition: () => number[] }
originalError: Error
}
describe('Translation Error Comments', () => {
// Mock renderContent for integration tests
let mockRenderContent: MockedFunction<(template: string, context: any) => string>
let mockRenderContent: MockedFunction<
(template: string, context: Record<string, unknown>) => string
>
beforeEach(() => {
mockRenderContent = vi.fn()
@@ -26,7 +37,7 @@ describe('Translation Error Comments', () => {
test('includes all fields when token information is available', () => {
const error = new Error("Unknown tag 'badtag', line:1, col:3")
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test/article.md',
getPosition: () => [1, 3],
}
@@ -48,11 +59,13 @@ describe('Translation Error Comments', () => {
test('includes original error message when available', () => {
const error = new Error("Unknown variable 'variables.nonexistent.value'")
error.name = 'RenderError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test/intro.md',
getPosition: () => [3, 15],
}
;(error as any).originalError = new Error('Variable not found: variables.nonexistent.value')
;(error as unknown as ErrorWithTokenAndOriginal).originalError = new Error(
'Variable not found: variables.nonexistent.value',
)
const result = createTranslationFallbackComment(error, 'rawIntro')
@@ -67,7 +80,7 @@ describe('Translation Error Comments', () => {
test('falls back to main error message when no originalError', () => {
const error = new Error('Main error message')
error.name = 'RenderError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -82,7 +95,7 @@ describe('Translation Error Comments', () => {
test('includes tokenization error details', () => {
const error = new Error('Unexpected token, line:1, col:10')
error.name = 'TokenizationError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test/page.md',
getPosition: () => [1, 10],
}
@@ -152,7 +165,7 @@ describe('Translation Error Comments', () => {
test('handles error with token but no file', () => {
const error = new Error('Error message')
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithTokenNoFile).token = {
// No file property
getPosition: () => [5, 10],
}
@@ -167,7 +180,7 @@ describe('Translation Error Comments', () => {
test('handles error with token but no getPosition method', () => {
const error = new Error('Error message')
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithTokenNoPosition).token = {
file: '/content/test.md',
// No getPosition method
}
@@ -246,7 +259,7 @@ describe('Translation Error Comments', () => {
test('comment format is valid HTML', () => {
const error = new Error('Test error')
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -264,7 +277,7 @@ describe('Translation Error Comments', () => {
test('contains all required fields when available', () => {
const error = new Error('Detailed error message')
error.name = 'RenderError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/detailed-test.md',
getPosition: () => [42, 15],
}
@@ -283,7 +296,7 @@ describe('Translation Error Comments', () => {
test('maintains consistent field order', () => {
const error = new Error('Test message')
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 1],
}
@@ -320,18 +333,20 @@ describe('Translation Error Comments', () => {
}
// Mock renderContent to simulate error for Japanese, success for English
mockRenderContent.mockImplementation((template: string, innerContext: any) => {
mockRenderContent.mockImplementation(
(template: string, innerContext: Record<string, unknown>) => {
if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
const error = new Error("Unknown tag 'badtag'")
error.name = 'ParseError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/test.md',
getPosition: () => [1, 5],
}
throw error
}
return innerContext.currentLanguage === 'en' ? 'English Title' : template
})
},
)
const result = await renderContentWithFallback(mockPage, 'rawTitle', context)
@@ -357,14 +372,16 @@ describe('Translation Error Comments', () => {
},
}
mockRenderContent.mockImplementation((template: string, innerContext: any) => {
mockRenderContent.mockImplementation(
(template: string, innerContext: Record<string, unknown>) => {
if (innerContext.currentLanguage !== 'en' && template.includes('badtag')) {
const error = new Error("Unknown tag 'badtag'")
error.name = 'ParseError'
throw error
}
return 'English Title'
})
},
)
const result = await renderContentWithFallback(mockPage, 'rawTitle', context, {
textOnly: true,
@@ -384,7 +401,7 @@ describe('Translation Error Comments', () => {
const failingCallable = async () => {
const error = new Error("Unknown variable 'variables.bad'")
error.name = 'RenderError'
;(error as any).token = {
;(error as unknown as ErrorWithToken).token = {
file: '/content/article.md',
getPosition: () => [10, 20],
}

View File

@@ -27,8 +27,6 @@ export default async function learningTrack(
const trackName = req.query.learn as string
let trackProduct = req.context.currentProduct as string
// TODO: Once getDeepDataByLanguage is ported to TS
// a more appropriate API would be to use `getDeepDataByLanguage<LearningTracks)(...)`
const allLearningTracks = getDeepDataByLanguage(
'learning-tracks',
req.language!,

View File

@@ -11,8 +11,6 @@ const pages = await loadPageMap(pageList)
const redirects = await loadRedirects(pageList)
describe('learning tracks', () => {
// TODO: Once getDeepDataByLanguage is ported to TS
// a more appropriate API would be to use `getDeepDataByLanguage<LearningTracks)(...)`
const allLearningTracks = getDeepDataByLanguage('learning-tracks', 'en') as LearningTracks
const topLevels = Object.keys(allLearningTracks)

View File

@@ -2,6 +2,9 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'
import { getAutomaticRequestLogger } from '@/observability/logger/middleware/get-automatic-request-logger'
import type { Request, Response, NextFunction } from 'express'
// Type alias for mock response with overridden end function
type MockResponseWithEnd = Partial<Response> & { end: () => unknown }
describe('getAutomaticRequestLogger', () => {
let originalEnv: typeof process.env
let originalConsoleLog: typeof console.log
@@ -43,7 +46,7 @@ describe('getAutomaticRequestLogger', () => {
}
// Override res.end to simulate response completion
function endOverride(this: any, chunk?: any, encoding?: any) {
function endOverride(this: Response, chunk?: unknown, encoding?: unknown): Response {
if (!responseEnded) {
responseEnded = true
// Simulate a small delay for response time
@@ -54,7 +57,7 @@ describe('getAutomaticRequestLogger', () => {
return this
}
;(mockRes as any).end = endOverride
;(mockRes as { end: typeof endOverride }).end = endOverride
mockNext = vi.fn()
@@ -86,7 +89,7 @@ describe('getAutomaticRequestLogger', () => {
middleware(mockReq as Request, mockRes as Response, mockNext)
// Simulate response completion
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
// Wait for async logging
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -143,7 +146,7 @@ describe('getAutomaticRequestLogger', () => {
}
// Override res.end to simulate response completion
function endOverride(this: any, chunk?: any, encoding?: any) {
function endOverride(this: Response, chunk?: unknown, encoding?: unknown): Response {
if (!responseEnded) {
responseEnded = true
// Simulate a small delay for response time
@@ -154,7 +157,7 @@ describe('getAutomaticRequestLogger', () => {
return this
}
;(freshMockRes as any).end = endOverride
;(freshMockRes as { end: typeof endOverride }).end = endOverride
const freshMockNext = vi.fn()
@@ -165,7 +168,7 @@ describe('getAutomaticRequestLogger', () => {
freshMockRes as Partial<Response> as Response,
freshMockNext,
)
;(freshMockRes as any).end()
;(freshMockRes as MockResponseWithEnd).end()
// Wait for async logging with longer timeout for CI
await new Promise((resolve) => setTimeout(resolve, 50))
@@ -187,7 +190,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -202,7 +205,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -215,7 +218,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -233,7 +236,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -252,7 +255,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -291,7 +294,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
// Wait for any potential async logging with longer timeout for CI
await new Promise((resolve) => setTimeout(resolve, 50))
@@ -309,7 +312,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -320,11 +323,13 @@ describe('getAutomaticRequestLogger', () => {
describe('edge cases', () => {
it('should handle missing content-length header', async () => {
;(mockRes as any).getHeader = vi.fn(() => undefined)
;(mockRes as Partial<Response> & { getHeader: () => undefined }).getHeader = vi.fn(
() => undefined,
)
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -333,11 +338,11 @@ describe('getAutomaticRequestLogger', () => {
})
it('should handle missing status code', async () => {
delete (mockRes as any).statusCode
delete (mockRes as Partial<Response> & { statusCode?: number }).statusCode
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -351,7 +356,7 @@ describe('getAutomaticRequestLogger', () => {
const middleware = getAutomaticRequestLogger()
middleware(mockReq as Request, mockRes as Response, mockNext)
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
@@ -368,7 +373,7 @@ describe('getAutomaticRequestLogger', () => {
// Simulate some processing time
await new Promise((resolve) => setTimeout(resolve, 50))
;(mockRes as any).end()
;(mockRes as MockResponseWithEnd).end()
await new Promise((resolve) => setTimeout(resolve, 20))
const endTime = Date.now()

View File

@@ -327,10 +327,6 @@ describe('createLogger', () => {
logger = createLogger('file:///path/to/test.js')
})
it('should include logger context in production logs', () => {
// TODO
})
it('should handle missing logger context gracefully in development', () => {
logger.info('No context test')

Some files were not shown because too many files have changed in this diff Show More