1
0
mirror of synced 2025-12-19 18:10:59 -05:00

Merge pull request #41687 from github/repo-sync

Repo sync
This commit is contained in:
docs-bot
2025-12-04 13:49:17 -08:00
committed by GitHub
51 changed files with 1423 additions and 190 deletions

View File

@@ -57,18 +57,20 @@ By default, {% data variables.product.prodname_dependabot %} will stop rebasing
You can use any of the following commands on a {% data variables.product.prodname_dependabot %} pull request.
* `@dependabot cancel merge` cancels a previously requested merge.
* `@dependabot close` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from recreating that pull request. You can achieve the same result by closing the pull request manually.
* `@dependabot ignore this dependency` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this dependency (unless you reopen the pull request or upgrade to the suggested version of the dependency yourself).
* `@dependabot ignore this major version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this major version (unless you reopen the pull request or upgrade to this major version yourself).
* `@dependabot ignore this minor version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this minor version (unless you reopen the pull request or upgrade to this minor version yourself).
* `@dependabot ignore this patch version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this patch version (unless you reopen the pull request or upgrade to this patch version yourself).
* `@dependabot merge` merges the pull request once your CI tests have passed.
* `@dependabot rebase` rebases the pull request.
* `@dependabot recreate` recreates the pull request, overwriting any edits that have been made to the pull request.
* `@dependabot reopen` reopens the pull request if the pull request is closed.
* `@dependabot show DEPENDENCY_NAME ignore conditions` retrieves information on the ignore conditions for the specified dependency, and comments on the pull request with a table that displays all ignore conditions for the dependency. For example, `@dependabot show express ignore conditions` would find all `ignore` conditions stored for the Express dependency, and comment on the pull request with that information.
* `@dependabot squash and merge` squashes and merges the pull request once your CI tests have passed.
| Command | Description |
| --- | --- |
| `@dependabot cancel merge` | Cancels a previously requested merge. |
| `@dependabot close` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from recreating that pull request. You can achieve the same result by closing the pull request manually. |
| `@dependabot ignore this dependency` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this dependency (unless you reopen the pull request or upgrade to the suggested version yourself). |
| `@dependabot ignore this major version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this major version (unless you reopen the pull request or upgrade to this major version yourself). |
| `@dependabot ignore this minor version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this minor version (unless you reopen the pull request or upgrade to this minor version yourself). |
| `@dependabot ignore this patch version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from creating any more pull requests for this patch version (unless you reopen the pull request or upgrade to this patch version yourself). |
| `@dependabot merge` | Merges the pull request once your CI tests have passed. |
| `@dependabot rebase` | Rebases the pull request. |
| `@dependabot recreate` | Recreates the pull request, overwriting any edits that have been made to the pull request. |
| `@dependabot reopen` | Reopens the pull request if the pull request is closed. |
| `@dependabot show DEPENDENCY_NAME ignore conditions` | Retrieves information on the ignore conditions for the specified dependency, and comments on the pull request with a table that displays all ignore conditions for the dependency. For example, `@dependabot show express ignore conditions` would find all `ignore` conditions stored for the Express dependency, and comment on the pull request with that information. |
| `@dependabot squash and merge` | Squashes and merges the pull request once your CI tests have passed. |
{% data variables.product.prodname_dependabot %} will react with a "thumbs up" emoji to acknowledge the command, and may respond with a comment on the pull request. While {% data variables.product.prodname_dependabot %} usually responds quickly, some commands may take several minutes to complete if {% data variables.product.prodname_dependabot %} is busy processing other updates or commands.
@@ -80,13 +82,15 @@ For more information, see [AUTOTITLE](/code-security/dependabot/working-with-dep
In {% data variables.product.prodname_dependabot %} pull requests for grouped version updates and security updates, you can use comment commands to ignore and un-ignore updates for specific dependencies and versions. You can use any of the following commands to manage ignore conditions for grouped updates.
* `@dependabot ignore DEPENDENCY_NAME` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency.
* `@dependabot ignore DEPENDENCY_NAME major version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's major version.
* `@dependabot ignore DEPENDENCY_NAME minor version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's minor version.
* `@dependabot ignore DEPENDENCY_NAME patch version` closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's patch version.
* `@dependabot unignore *` closes the current pull request, clears all `ignore` conditions stored for all dependencies in the group, then opens a new pull request.
* `@dependabot unignore DEPENDENCY_NAME` closes the current pull request, clears all `ignore` conditions stored for the dependency, then opens a new pull request that includes available updates for the specified dependency. For example, `@dependabot unignore lodash` would open a new pull request that includes updates for the Lodash dependency.
* `@dependabot unignore DEPENDENCY_NAME IGNORE_CONDITION` closes the current pull request, clears the stored `ignore` condition, then opens a new pull request that includes available updates for the specified ignore condition. For example, `@dependabot unignore express [< 1.9, > 1.8.0]` would open a new pull request that includes updates for Express between versions 1.8.0 and 1.9.0.
| Command | Description |
| --- | --- |
| `@dependabot ignore DEPENDENCY_NAME` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency. |
| `@dependabot ignore DEPENDENCY_NAME major version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's major version. |
| `@dependabot ignore DEPENDENCY_NAME minor version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's minor version. |
| `@dependabot ignore DEPENDENCY_NAME patch version` | Closes the pull request and prevents {% data variables.product.prodname_dependabot %} from updating this dependency's patch version. |
| `@dependabot unignore *` | Closes the current pull request, clears all `ignore` conditions stored for all dependencies in the group, then opens a new pull request. |
| `@dependabot unignore DEPENDENCY_NAME` | Closes the current pull request, clears all `ignore` conditions stored for the dependency, then opens a new pull request that includes available updates for the specified dependency. For example, `@dependabot unignore lodash` would open a new pull request that includes updates for the Lodash dependency. |
| `@dependabot unignore DEPENDENCY_NAME IGNORE_CONDITION` | Closes the current pull request, clears the stored `ignore` condition, then opens a new pull request that includes available updates for the specified ignore condition. For example, `@dependabot unignore express [< 1.9, > 1.8.0]` would open a new pull request that includes updates for Express between versions 1.8.0 and 1.9.0. |
> [!TIP]
> When you want to un-ignore a specific ignore condition, use the `@dependabot show DEPENDENCY_NAME ignore conditions` command to quickly check what ignore conditions a dependency currently has.

View File

@@ -17,7 +17,7 @@ category:
Experience less rate limiting and reduce the mental load of choosing a model by letting {% data variables.copilot.copilot_auto_model_selection %} automatically choose the best available model on your behalf.
{% data variables.copilot.copilot_auto_model_selection %} is currently optimized for model availability, choosing from a list of models that may change over time. It currently chooses from {% data variables.copilot.copilot_gpt_41 %}, {% data variables.copilot.copilot_gpt_5_mini %}, {% data variables.copilot.copilot_gpt_5 %}, {% data variables.copilot.copilot_claude_haiku_45 %}, and {% data variables.copilot.copilot_claude_sonnet_45 %}, based on your subscription type.
{% data variables.copilot.copilot_auto_model_selection %} is currently optimized for model availability, choosing from a list of models that may change over time. It currently chooses from {% data variables.copilot.copilot_gpt_41 %}, {% data variables.copilot.copilot_gpt_5_mini %}, {% data variables.copilot.copilot_gpt_51_codex_max %}, {% data variables.copilot.copilot_claude_haiku_45 %}, {% data variables.copilot.copilot_claude_sonnet_45 %}, and {% data variables.copilot.copilot_gemini_3_pro %}, based on your subscription type.
With {% data variables.copilot.copilot_auto_model_selection %}, you benefit from:
* Reduced chances of rate limiting

View File

@@ -310,14 +310,21 @@ If you want to allow {% data variables.product.prodname_copilot_short %} to acce
"github-mcp-server": {
"type": "http",
// Remove "/readonly" to enable wider access to all tools.
// Then, use the "tools" key to specify the subset of tools you'd like to include.
// Then, use the "X-MCP-Toolsets" header to specify which toolsets you'd like to include.
// Use the "tools" field to select individual tools from the toolsets.
"url": "https://api.githubcopilot.com/mcp/readonly",
"tools": ["*"]
"tools": ["*"],
"headers": {
"X-MCP-Toolsets": "repos,issues,users,pull_requests,code_security,secret_protection,actions,web_search"
}
}
}
}
```
For more information on toolsets, refer to the [README](https://github.com/github/github-mcp-server?tab=readme-ov-file#available-toolsets) in the {% data variables.product.github %} Remote MCP Server documentation.
1. Click **Save**.
{% data reusables.actions.sidebar-environment %}
1. Click the `copilot` environment.

View File

@@ -27,6 +27,7 @@ Used for:
* {% data variables.copilot.copilot_gpt_51 %}
* {% data variables.copilot.copilot_gpt_51_codex %}
* {% data variables.copilot.copilot_gpt_51_codex_mini %}
* {% data variables.copilot.copilot_gpt_51_codex_max %}
These models are hosted by OpenAI and {% data variables.product.github %}'s Azure infrastructure.

View File

@@ -69,6 +69,10 @@
multiplier_paid: 0.33
multiplier_free: Not applicable
- name: GPT-5.1-Codex-Max
multiplier_paid: 1.0
multiplier_free: Not applicable
- name: Grok Code Fast 1
multiplier_paid: 0.25
multiplier_free: Not applicable

View File

@@ -67,6 +67,13 @@
ask_mode: true
edit_mode: true
- name: 'GPT-5.1-Codex-Max'
provider: 'OpenAI'
release_status: 'Public preview'
agent_mode: true
ask_mode: true
edit_mode: true
# Anthropic models
- name: 'Claude Haiku 4.5'
provider: 'Anthropic'

View File

@@ -125,6 +125,14 @@
xcode: true
jetbrains: true
- name: GPT-5.1-Codex-Max
dotcom: true
vscode: true
vs: false
eclipse: false
xcode: false
jetbrains: false
- name: Grok Code Fast 1
dotcom: true
vscode: true

View File

@@ -110,6 +110,13 @@
business: true
enterprise: true
- name: GPT-5.1-Codex-Max
free: false
pro: true
pro_plus: true
business: true
enterprise: true
- name: Grok Code Fast 1
free: false
pro: true

View File

@@ -158,6 +158,7 @@ copilot_gpt_5_mini: 'GPT-5 mini'
copilot_gpt_51: 'GPT-5.1'
copilot_gpt_51_codex: 'GPT-5.1-Codex'
copilot_gpt_51_codex_mini: 'GPT-5.1-Codex-Mini'
copilot_gpt_51_codex_max: 'GPT-5.1-Codex-Max'
# OpenAI 'o' series:
copilot_o3: 'o3'
copilot_o4_mini: 'o4-mini'

View File

@@ -13,12 +13,24 @@ Article API endpoints allow consumers to query GitHub Docs for listings of curre
The `/api/article/meta` endpoint powers hovercards, which provide a preview for internal links on <docs.github.com>.
The `/api/article/body` endpoint can serve markdown for both regular articles and autogenerated content (such as REST API documentation) using specialized transformers.
## How it works
The `/api/article` endpoints return information about a page by `pathname`.
`api/article/meta` is highly cached, in JSON format.
### Autogenerated Content Transformers
For autogenerated pages (REST, landing pages, audit logs, webhooks, GraphQL, etc), the Article API uses specialized transformers to convert the rendered content into markdown format. These transformers are located in `src/article-api/transformers/` and use an extensible architecture:
To add a new transformer for other autogenerated content types:
1. Create a new transformer file implementing the `PageTransformer` interface
2. Register it in `transformers/index.ts`
3. Create a template in `templates/` to configure how the transformer will organize the autogenerated content
4. The transformer will automatically be used by `/api/article/body`
## How to get help
For internal folks ask in the Docs Engineering slack channel.
@@ -34,12 +46,13 @@ Get article metadata and content in a single object. Equivalent to calling `/art
**Parameters**:
- **pathname** (string) - Article path (e.g. '/en/get-started/article-name')
- **[apiVersion]** (string) - API version for REST pages (optional, defaults to latest)
**Returns**: (object) - JSON object with article metadata and content (`meta` and `body` keys)
**Throws**:
- (Error): 403 - If the article body cannot be retrieved. Reason is given in the error message.
- (Error): 400 - If pathname parameter is invalid.
- (Error): 400 - If pathname or apiVersion parameters are invalid.
- (Error): 404 - If the path is valid, but the page couldn't be resolved.
**Example**:
@@ -63,12 +76,13 @@ Get the contents of an article's body.
**Parameters**:
- **pathname** (string) - Article path (e.g. '/en/get-started/article-name')
- **[apiVersion]** (string) - API version (optional, defaults to latest)
**Returns**: (string) - Article body content in markdown format.
**Throws**:
- (Error): 403 - If the article body cannot be retrieved. Reason is given in the error message.
- (Error): 400 - If pathname parameter is invalid.
- (Error): 400 - If pathname or apiVersion parameters are invalid.
- (Error): 404 - If the path is valid, but the page couldn't be resolved.
**Example**:

View File

@@ -0,0 +1,16 @@
/**
* API Transformer Liquid Tags
*
* This module contains custom Liquid tags used by article-api transformers
* to render API documentation in a consistent format.
*/
import { restTags } from './rest-tags'
// Export all API transformer tags for registration
export const apiTransformerTags = {
...restTags,
}
// Re-export individual tag modules for direct access if needed
export { restTags } from './rest-tags'

View File

@@ -0,0 +1,230 @@
import type { TagToken, Context as LiquidContext } from 'liquidjs'
import { fastTextOnly } from '@/content-render/unified/text-only'
import { renderContent } from '@/content-render/index'
import type { Context } from '@/types'
import type { Parameter, BodyParameter, ChildParameter, StatusCode } from '@/rest/components/types'
import { createLogger } from '@/observability/logger'
const logger = createLogger('article-api/liquid-renderers/rest-tags')
/**
* Custom Liquid tag for rendering REST API parameters
* Usage: {% rest_parameter param %}
*/
export class RestParameter {
private paramName: string
constructor(
token: TagToken,
remainTokens: TagToken[],
liquid: { options: any; parser: any },
private liquidContext?: LiquidContext,
) {
// The tag receives the parameter object from the template context
this.paramName = token.args.trim()
}
async render(ctx: LiquidContext, emitter: any): Promise<void> {
const param = ctx.get([this.paramName]) as Parameter
const context = ctx.get(['context']) as Context
if (!param) {
emitter.write('')
return
}
const lines: string[] = []
const required = param.required ? ' (required)' : ''
const type = param.schema?.type || 'string'
lines.push(`- **\`${param.name}\`** (${type})${required}`)
if (param.description) {
const description = await htmlToMarkdown(param.description, context)
lines.push(` ${description}`)
}
if (param.schema?.default !== undefined) {
lines.push(` Default: \`${param.schema.default}\``)
}
if (param.schema?.enum && param.schema.enum.length > 0) {
lines.push(` Can be one of: ${param.schema.enum.map((v) => `\`${v}\``).join(', ')}`)
}
emitter.write(lines.join('\n'))
}
}
/**
* Custom Liquid tag for rendering REST API body parameters
* Usage: {% rest_body_parameter param indent %}
*/
export class RestBodyParameter {
constructor(
token: TagToken,
remainTokens: TagToken[],
liquid: { options: any; parser: any },
private liquidContext?: LiquidContext,
) {
// Parse arguments - param name and optional indent level
const args = token.args.trim().split(/\s+/)
this.param = args[0]
this.indent = args[1] ? parseInt(args[1]) : 0
}
private param: string
private indent: number
async render(ctx: LiquidContext, emitter: any): Promise<void> {
const param = ctx.get([this.param]) as BodyParameter
const context = ctx.get(['context']) as Context
const indent = this.indent
if (!param) {
emitter.write('')
return
}
const lines: string[] = []
const prefix = ' '.repeat(indent)
const required = param.isRequired ? ' (required)' : ''
const type = param.type || 'string'
lines.push(`${prefix}- **\`${param.name}\`** (${type})${required}`)
if (param.description) {
const description = await htmlToMarkdown(param.description, context)
lines.push(`${prefix} ${description}`)
}
if (param.default !== undefined) {
lines.push(`${prefix} Default: \`${param.default}\``)
}
if (param.enum && param.enum.length > 0) {
lines.push(`${prefix} Can be one of: ${param.enum.map((v) => `\`${v}\``).join(', ')}`)
}
// Handle nested parameters
if (param.childParamsGroups && param.childParamsGroups.length > 0) {
for (const childGroup of param.childParamsGroups) {
lines.push(await renderChildParameter(childGroup, context, indent + 1))
}
}
emitter.write(lines.join('\n'))
}
}
/**
* Custom Liquid tag for rendering REST API status codes
* Usage: {% rest_status_code statusCode %}
*/
export class RestStatusCode {
private statusCodeName: string
constructor(
token: TagToken,
remainTokens: TagToken[],
liquid: { options: any; parser: any },
private liquidContext?: LiquidContext,
) {
this.statusCodeName = token.args.trim()
}
async render(ctx: LiquidContext, emitter: any): Promise<void> {
const statusCode = ctx.get([this.statusCodeName]) as StatusCode
const context = ctx.get(['context']) as Context
if (!statusCode) {
emitter.write('')
return
}
const lines: string[] = []
if (statusCode.description) {
const description = await htmlToMarkdown(statusCode.description, context)
lines.push(`- **${statusCode.httpStatusCode}**`)
if (description.trim()) {
lines.push(` ${description.trim()}`)
}
} else if (statusCode.httpStatusMessage) {
lines.push(`- **${statusCode.httpStatusCode}** - ${statusCode.httpStatusMessage}`)
} else {
lines.push(`- **${statusCode.httpStatusCode}**`)
}
emitter.write(lines.join('\n'))
}
}
/**
* Helper function to render child parameters recursively
*/
async function renderChildParameter(
param: ChildParameter,
context: Context,
indent: number,
): Promise<string> {
const lines: string[] = []
const prefix = ' '.repeat(indent)
const required = param.isRequired ? ' (required)' : ''
const type = param.type || 'string'
lines.push(`${prefix}- **\`${param.name}\`** (${type})${required}`)
if (param.description) {
const description = await htmlToMarkdown(param.description, context)
lines.push(`${prefix} ${description}`)
}
if (param.default !== undefined) {
lines.push(`${prefix} Default: \`${param.default}\``)
}
if (param.enum && param.enum.length > 0) {
lines.push(`${prefix} Can be one of: ${param.enum.map((v: string) => `\`${v}\``).join(', ')}`)
}
// Recursively handle nested parameters
if (param.childParamsGroups && param.childParamsGroups.length > 0) {
for (const child of param.childParamsGroups) {
lines.push(await renderChildParameter(child, context, indent + 1))
}
}
return lines.join('\n')
}
/**
* Helper function to convert HTML to markdown
*/
async function htmlToMarkdown(html: string, context: Context): Promise<string> {
if (!html) return ''
try {
const rendered = await renderContent(html, context, { textOnly: false })
return fastTextOnly(rendered)
} catch (error) {
logger.error('Failed to render HTML content to markdown in REST tag', {
error,
html: html.substring(0, 100), // First 100 chars for context
contextInfo: context && context.page ? { page: context.page.relativePath } : undefined,
})
// In non-production, re-throw to aid debugging
if (process.env.NODE_ENV !== 'production') {
throw error
}
// Fallback to simple text extraction
return fastTextOnly(html)
}
}
// Export tag names for registration
export const restTags = {
rest_parameter: RestParameter,
rest_body_parameter: RestBodyParameter,
rest_status_code: RestStatusCode,
}

View File

@@ -3,20 +3,15 @@ import type { Response } from 'express'
import { Context } from '@/types'
import { ExtendedRequestWithPageInfo } from '@/article-api/types'
import contextualize from '@/frame/middleware/context/context'
import { transformerRegistry } from '@/article-api/transformers'
import { allVersions } from '@/versions/lib/all-versions'
import type { Page } from '@/types'
export async function getArticleBody(req: ExtendedRequestWithPageInfo) {
// req.pageinfo is set from pageValidationMiddleware and pathValidationMiddleware
// and is in the ExtendedRequestWithPageInfo
const { page, pathname, archived } = req.pageinfo
if (archived?.isArchived)
throw new Error(`Page ${pathname} is archived and can't be rendered in markdown.`)
// for anything that's not an article (like index pages), don't try to render and
// tell the user what's going on
if (page.documentType !== 'article') {
throw new Error(`Page ${pathname} isn't yet available in markdown.`)
}
// these parts allow us to render the page
/**
* Creates a mocked rendering request and contextualizes it.
* This is used to prepare a request for rendering pages in markdown format.
*/
async function createContextualizedRenderingRequest(pathname: string, page: Page) {
const mockedContext: Context = {}
const renderingReq = {
path: pathname,
@@ -29,9 +24,51 @@ export async function getArticleBody(req: ExtendedRequestWithPageInfo) {
},
}
// contextualize and render the page
// contextualize the request to get proper version info
await contextualize(renderingReq as ExtendedRequestWithPageInfo, {} as Response, () => {})
renderingReq.context.page = page
return renderingReq
}
export async function getArticleBody(req: ExtendedRequestWithPageInfo) {
// req.pageinfo is set from pageValidationMiddleware and pathValidationMiddleware
// and is in the ExtendedRequestWithPageInfo
const { page, pathname, archived } = req.pageinfo
if (archived?.isArchived)
throw new Error(`Page ${pathname} is archived and can't be rendered in markdown.`)
// Extract apiVersion from query params if provided
const apiVersion = req.query.apiVersion as string | undefined
// Check if there's a transformer for this page type (e.g., REST, webhooks, etc.)
const transformer = transformerRegistry.findTransformer(page)
if (transformer) {
// Use the transformer for autogenerated pages
const renderingReq = await createContextualizedRenderingRequest(pathname, page)
// Determine the API version to use (provided or latest)
// Validation is handled by apiVersionValidationMiddleware
const currentVersion = renderingReq.context.currentVersion
let effectiveApiVersion = apiVersion
// Use latest version if not provided
if (!effectiveApiVersion && currentVersion && allVersions[currentVersion]) {
effectiveApiVersion = allVersions[currentVersion].latestApiVersion || undefined
}
return await transformer.transform(page, pathname, renderingReq.context, effectiveApiVersion)
}
// For regular articles (non-autogenerated)
if (page.documentType !== 'article') {
throw new Error(`Page ${pathname} isn't yet available in markdown.`)
}
// these parts allow us to render the page
const renderingReq = await createContextualizedRenderingRequest(pathname, page)
renderingReq.context.markdownRequested = true
return await page.render(renderingReq.context)
}

View File

@@ -4,7 +4,11 @@ import express from 'express'
import { defaultCacheControl } from '@/frame/middleware/cache-control'
import catchMiddlewareError from '@/observability/middleware/catch-middleware-error'
import { ExtendedRequestWithPageInfo } from '../types'
import { pageValidationMiddleware, pathValidationMiddleware } from './validation'
import {
pageValidationMiddleware,
pathValidationMiddleware,
apiVersionValidationMiddleware,
} from './validation'
import { getArticleBody } from './article-body'
import { getMetadata } from './article-pageinfo'
import {
@@ -24,9 +28,10 @@ const router = express.Router()
* Get article metadata and content in a single object. Equivalent to calling `/article/meta` concatenated with `/article/body`.
* @route GET /api/article
* @param {string} pathname - Article path (e.g. '/en/get-started/article-name')
* @param {string} [apiVersion] - API version for REST pages (optional, defaults to latest)
* @returns {object} JSON object with article metadata and content (`meta` and `body` keys)
* @throws {Error} 403 - If the article body cannot be retrieved. Reason is given in the error message.
* @throws {Error} 400 - If pathname parameter is invalid.
* @throws {Error} 400 - If pathname or apiVersion parameters are invalid.
* @throws {Error} 404 - If the path is valid, but the page couldn't be resolved.
* @example
* curl -s "https://docs.github.com/api/article?pathname=/en/get-started/start-your-journey/about-github-and-git"
@@ -43,6 +48,7 @@ router.get(
'/',
pathValidationMiddleware as RequestHandler,
pageValidationMiddleware as RequestHandler,
apiVersionValidationMiddleware as RequestHandler,
catchMiddlewareError(async function (req: ExtendedRequestWithPageInfo, res: Response) {
const { meta, cacheInfo } = await getMetadata(req)
let bodyContent
@@ -66,9 +72,10 @@ router.get(
* Get the contents of an article's body.
* @route GET /api/article/body
* @param {string} pathname - Article path (e.g. '/en/get-started/article-name')
* @param {string} [apiVersion] - API version (optional, defaults to latest)
* @returns {string} Article body content in markdown format.
* @throws {Error} 403 - If the article body cannot be retrieved. Reason is given in the error message.
* @throws {Error} 400 - If pathname parameter is invalid.
* @throws {Error} 400 - If pathname or apiVersion parameters are invalid.
* @throws {Error} 404 - If the path is valid, but the page couldn't be resolved.
* @example
* curl -s https://docs.github.com/api/article/body\?pathname=/en/get-started/start-your-journey/about-github-and-git
@@ -83,6 +90,7 @@ router.get(
'/body',
pathValidationMiddleware as RequestHandler,
pageValidationMiddleware as RequestHandler,
apiVersionValidationMiddleware as RequestHandler,
catchMiddlewareError(async function (req: ExtendedRequestWithPageInfo, res: Response) {
let bodyContent
try {

View File

@@ -6,6 +6,7 @@ import { isArchivedVersionByPath } from '@/archives/lib/is-archived-version'
import getRedirect from '@/redirects/lib/get-redirect'
import { getVersionStringFromPath, getLangFromPath } from '@/frame/lib/path-utils'
import nonEnterpriseDefaultVersion from '@/versions/lib/non-enterprise-default-version'
import { allVersions } from '@/versions/lib/all-versions'
// validates the path for pagelist endpoint
// specifically, defaults to `/en/free-pro-team@latest` when those values are missing
@@ -123,3 +124,47 @@ export const pageValidationMiddleware = (
return next()
}
export const apiVersionValidationMiddleware = (
req: ExtendedRequestWithPageInfo,
res: Response,
next: NextFunction,
) => {
const apiVersion = req.query.apiVersion as string | string[] | undefined
// If no apiVersion is provided, continue (it will default to latest)
if (!apiVersion) {
return next()
}
// Validate apiVersion is a single string, not an array
if (Array.isArray(apiVersion)) {
return res.status(400).json({ error: "Multiple 'apiVersion' keys" })
}
// Get the version from the pathname query parameter
const pathname = req.pageinfo?.pathname || (req.query.pathname as string)
if (!pathname) {
// This should not happen as pathValidationMiddleware runs first
throw new Error('pathname not available for apiVersion validation')
}
// Extract version from the pathname
const currentVersion = getVersionStringFromPath(pathname) || nonEnterpriseDefaultVersion
const versionInfo = allVersions[currentVersion]
if (!versionInfo) {
return res.status(400).json({ error: `Invalid version '${currentVersion}'` })
}
const validApiVersions = versionInfo.apiVersions || []
// If this version has API versioning, validate the provided version
if (validApiVersions.length > 0 && !validApiVersions.includes(apiVersion)) {
return res.status(400).json({
error: `Invalid apiVersion '${apiVersion}' for ${currentVersion}. Valid API versions are: ${validApiVersions.join(', ')}`,
})
}
return next()
}

View File

@@ -0,0 +1,100 @@
# {{ page.title }}
{{ page.intro }}
{{ manualContent }}
{% for operation in restOperations %}
## {{ operation.title }}
```
{{ operation.verb | upcase }} {{ operation.requestPath }}
```
{{ operation.description }}
{% if operation.hasParameters %}
### Parameters
{% if operation.showHeaders %}
#### Headers
{% if operation.needsContentTypeHeader %}
- **`content-type`** (string, required)
Setting to `application/json` is required.
{% endif %}
- **`accept`** (string)
Setting to `application/vnd.github+json` is recommended.
{% endif %}
{% if operation.parameters.size > 0 %}
#### Path and query parameters
{% for param in operation.parameters %}
{% rest_parameter param %}
{% endfor %}
{% endif %}
{% if operation.bodyParameters.size > 0 %}
#### Body parameters
{% for param in operation.bodyParameters %}
{% rest_body_parameter param %}
{% endfor %}
{% endif %}
{% endif %}
{% if operation.statusCodes.size > 0 %}
### HTTP response status codes
{% for statusCode in operation.statusCodes %}
- **{{ statusCode.httpStatusCode }}**{% if statusCode.description %} - {{ statusCode.description }}{% elsif statusCode.httpStatusMessage %} - {{ statusCode.httpStatusMessage }}{% endif %}
{% endfor %}
{% endif %}
{% if operation.codeExamples.size > 0 %}
### Code examples
{% for example in operation.codeExamples %}
{% if example.request.description %}
#### {{ example.request.description }}
{% endif %}
**Request:**
```curl
curl -L \
-X {{ operation.verb | upcase }} \
{{ example.request.url }} \
{%- if example.request.acceptHeader %}
-H "Accept: {{ example.request.acceptHeader }}" \
{%- endif %}
-H "Authorization: Bearer <YOUR-TOKEN>"{% if apiVersion %} \
-H "X-GitHub-Api-Version: {{ apiVersion }}"{% endif -%}
{%- if example.request.bodyParameters %} \
-d '{{ example.request.bodyParameters }}'{% endif %}
```
**Response schema:**
{% if example.response.schema %}
```json
Status: {{ example.response.statusCode }}
{{ example.response.schema }}
```
{% else %}
```
Status: {{ example.response.statusCode }}
```
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}

View File

@@ -0,0 +1,309 @@
import { beforeAll, describe, expect, test } from 'vitest'
import { get } from '@/tests/helpers/e2etest'
const makeURL = (pathname: string, apiVersion?: string): string => {
const params = new URLSearchParams({ pathname })
if (apiVersion) {
params.set('apiVersion', apiVersion)
}
return `/api/article/body?${params}`
}
describe('REST transformer', () => {
beforeAll(() => {
if (!process.env.ROOT) {
console.warn(
'WARNING: The REST transformer tests require the ROOT environment variable to be set to the fixture root',
)
}
})
test('REST page renders with markdown structure', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
expect(res.headers['content-type']).toContain('text/markdown')
// Check for the main heading
expect(res.body).toContain('# GitHub Actions Artifacts')
// Check for intro (using fixture's prodname_actions which is 'HubGit Actions')
expect(res.body).toContain('Use the REST API to interact with artifacts in HubGit Actions.')
// Check for manual content section heading
expect(res.body).toContain('## About artifacts in HubGit Actions')
})
test('REST operations are formatted correctly', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for operation heading
expect(res.body).toContain('## List artifacts for a repository')
// Check for HTTP method and endpoint
expect(res.body).toContain('GET /repos/{owner}/{repo}/actions/artifacts')
// Check for operation description
expect(res.body).toContain('Lists all artifacts for a repository.')
})
test('Parameters section includes headers', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for parameters heading
expect(res.body).toContain('### Parameters')
// Check for headers section
expect(res.body).toContain('#### Headers')
// Check for accept header
expect(res.body).toContain('**`accept`** (string)')
expect(res.body).toContain('Setting to `application/vnd.github+json` is recommended.')
})
test('Path and query parameters are listed', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for path and query parameters section
expect(res.body).toContain('#### Path and query parameters')
// Check for specific parameters
expect(res.body).toContain('**`owner`** (string) (required)')
expect(res.body).toContain('The account owner of the repository.')
expect(res.body).toContain('**`repo`** (string) (required)')
expect(res.body).toContain('**`per_page`** (integer)')
expect(res.body).toContain('Default: `30`')
})
test('Status codes are formatted correctly', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for status codes section
expect(res.body).toContain('### HTTP response status codes')
// Check for specific status code
expect(res.body).toContain('**200**')
expect(res.body).toContain('OK')
})
test('Code examples include curl with proper formatting', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for code examples section
expect(res.body).toContain('### Code examples')
// Check for request/response labels
expect(res.body).toContain('**Request:**')
expect(res.body).toContain('**Response schema:**')
// Check for curl code block
expect(res.body).toContain('```curl')
expect(res.body).toContain('curl -L \\')
expect(res.body).toContain('-X GET \\')
expect(res.body).toContain('https://api.github.com/repos/OWNER/REPO/actions/artifacts \\')
expect(res.body).toContain('-H "Accept: application/vnd.github.v3+json" \\')
expect(res.body).toContain('-H "Authorization: Bearer <YOUR-TOKEN>"')
})
test('Code examples include X-GitHub-Api-Version header by default', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for API version header in curl example
expect(res.body).toContain('-H "X-GitHub-Api-Version: 2022-11-28"')
})
test('Code examples include specified API version', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts', '2022-11-28'))
expect(res.statusCode).toBe(200)
// Check for the specified API version header
expect(res.body).toContain('-H "X-GitHub-Api-Version: 2022-11-28"')
})
test('Liquid tags are rendered in intro', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Liquid tags should be rendered, not shown as raw tags (fixture uses 'HubGit Actions')
expect(res.body).toContain('HubGit Actions')
expect(res.body).not.toContain('{% data variables.product.prodname_actions %}')
// Check in both the intro and the manual content section
expect(res.body).toMatch(/Use the REST API to interact with artifacts in HubGit Actions/)
expect(res.body).toMatch(/About artifacts in HubGit Actions/)
})
test('AUTOTITLE links are resolved', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check that AUTOTITLE has been resolved to actual link text
// The link should have the actual page title, not "AUTOTITLE"
expect(res.body).toContain('[Storing workflow data as artifacts]')
expect(res.body).toContain('(/en/actions/using-workflows/storing-workflow-data-as-artifacts)')
// Make sure the raw AUTOTITLE tag is not present
expect(res.body).not.toContain('[AUTOTITLE]')
// Verify the link appears in the manual content section
expect(res.body).toMatch(
/About artifacts in HubGit Actions[\s\S]*Storing workflow data as artifacts/,
)
})
test('Markdown links are preserved in descriptions', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check that markdown links are preserved
expect(res.body).toMatch(/\[.*?\]\(\/en\/.*?\)/)
})
test('Response schema is formatted correctly', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for JSON code block with schema label
expect(res.body).toContain('**Response schema:**')
expect(res.body).toContain('```json')
expect(res.body).toContain('Status: 200')
// Verify schema structure is present (not an example)
expect(res.body).toContain('"type":')
expect(res.body).toContain('"properties":')
// Check for common schema keywords
const schemaMatch = res.body.match(/```json\s+Status: 200\s+([\s\S]*?)```/)
expect(schemaMatch).toBeTruthy()
if (schemaMatch) {
const schemaContent = schemaMatch[1]
const schema = JSON.parse(schemaContent)
// Verify it's a valid OpenAPI/JSON schema structure
expect(schema).toHaveProperty('type')
expect(schema.type).toBe('object')
expect(schema).toHaveProperty('properties')
// Verify it has expected properties for artifacts response
expect(schema.properties).toHaveProperty('total_count')
expect(schema.properties).toHaveProperty('artifacts')
}
})
test('Non-REST pages return appropriate error', async () => {
const res = await get(makeURL('/en/get-started/start-your-journey/hello-world'))
expect(res.statusCode).toBe(200)
// Regular article pages should still work, they just won't use the transformer
expect(res.body).toContain('## Introduction')
})
test('Invalid apiVersion returns 400 error', async () => {
// An invalid API version should return a validation error with 400 status
const res = await get(makeURL('/en/rest/actions/artifacts', 'invalid-version'))
// Returns 400 because the apiVersion is invalid (client error)
expect(res.statusCode).toBe(400)
const parsed = JSON.parse(res.body)
expect(parsed.error).toContain("Invalid apiVersion 'invalid-version'")
expect(parsed.error).toContain('Valid API versions are:')
expect(parsed.error).toContain('2022-11-28')
})
test('Multiple apiVersion query parameters returns 400 error', async () => {
// Multiple apiVersion parameters should be rejected
const res = await get(
'/api/article/body?pathname=/en/rest/actions/artifacts&apiVersion=2022-11-28&apiVersion=2023-01-01',
)
expect(res.statusCode).toBe(400)
const parsed = JSON.parse(res.body)
expect(parsed.error).toBe("Multiple 'apiVersion' keys")
})
test('Valid apiVersion passes validation', async () => {
// A valid API version should work
const res = await get(makeURL('/en/rest/actions/artifacts', '2022-11-28'))
expect(res.statusCode).toBe(200)
expect(res.body).toContain('-H "X-GitHub-Api-Version: 2022-11-28"')
})
test('Missing apiVersion defaults to latest', async () => {
// When no apiVersion is provided, it should default to the latest version
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Should include the default API version header
expect(res.body).toContain('-H "X-GitHub-Api-Version: 2022-11-28"')
})
test('Multiple operations on a page are all rendered', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Check for multiple operation headings
expect(res.body).toContain('## List artifacts for a repository')
expect(res.body).toContain('## Get an artifact')
expect(res.body).toContain('## Delete an artifact')
})
test('Body parameters are formatted correctly for POST/PUT operations', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// For operations with body parameters, check formatting
// (artifacts endpoint is mostly GET/DELETE, but structure should be there)
// The transformer handles body parameters when present
})
test('Content-type header is included for operations that need it', async () => {
const res = await get(makeURL('/en/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// Content-type header appears for operations that require it
// The REST transformer adds this based on the operation data
})
test('Non-English language paths work correctly', async () => {
// Note: This test may fail in dev mode with ENABLED_LANGUAGES=en
// but the transformer itself should handle any language path
const res = await get(makeURL('/ja/rest/actions/artifacts'))
expect(res.statusCode).toBe(200)
// The transformer should work regardless of language prefix
// because it looks for 'rest' in the path and gets the category/subcategory after it
// e.g. /ja/rest/actions/artifacts should work the same as /en/rest/actions/artifacts
// Verify the operation content is present (in English, since REST data is not translated)
expect(res.body).toContain('## List artifacts for a repository')
expect(res.body).toContain('GET /repos/{owner}/{repo}/actions/artifacts')
// Check what language is actually being served by examining the response
// If Japanese translations are loaded, the title will be in Japanese
// Otherwise, it falls back to English
const hasJapaneseTitle = res.body.includes('# GitHub Actions アーティファクト')
const hasEnglishTitle = res.body.includes('# GitHub Actions Artifacts')
// One of them must be present
expect(hasJapaneseTitle || hasEnglishTitle).toBe(true)
// Verify the appropriate content based on which language was served
if (hasJapaneseTitle) {
// If Japanese is loaded, expect Japanese intro text
expect(res.body).toContain('アーティファクト')
} else {
// If Japanese is not loaded, expect English fallback
expect(res.body).toContain('Use the REST API to interact with artifacts in HubGit Actions')
}
})
})

View File

@@ -0,0 +1,18 @@
import { TransformerRegistry } from './types'
import { RestTransformer } from './rest-transformer'
/**
* Global transformer registry
* Registers all available page-to-markdown transformers
*/
export const transformerRegistry = new TransformerRegistry()
// Register REST transformer
transformerRegistry.register(new RestTransformer())
// Future transformers can be registered here:
// transformerRegistry.register(new WebhooksTransformer())
// transformerRegistry.register(new GitHubAppsTransformer())
export { TransformerRegistry } from './types'
export type { PageTransformer } from './types'

View File

@@ -0,0 +1,210 @@
import type { Context, Page } from '@/types'
import type { PageTransformer } from './types'
import type { Operation } from '@/rest/components/types'
import { renderContent } from '@/content-render/index'
import matter from '@gr2m/gray-matter'
import { readFileSync } from 'fs'
import { join, dirname } from 'path'
import { fileURLToPath } from 'url'
import { fastTextOnly } from '@/content-render/unified/text-only'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
/**
* Transformer for REST API pages
* Converts REST operations and their data into markdown format using a Liquid template
*/
export class RestTransformer implements PageTransformer {
canTransform(page: Page): boolean {
// Only transform REST pages that are not landing pages
// Landing pages (like /en/rest) will be handled by a separate transformer
return page.autogenerated === 'rest' && !page.relativePath.endsWith('index.md')
}
async transform(
page: Page,
pathname: string,
context: Context,
apiVersion?: string,
): Promise<string> {
// Import getRest dynamically to avoid circular dependencies
const { default: getRest } = await import('@/rest/lib/index')
// Extract version from context
const currentVersion = context.currentVersion!
// Use the provided apiVersion, or fall back to the latest from context
const effectiveApiVersion =
apiVersion ||
(context.currentVersionObj?.apiVersions?.length
? context.currentVersionObj.latestApiVersion
: undefined)
// Parse the category and subcategory from the page path
// e.g. /en/rest/actions/artifacts -> category: actions, subcategory: artifacts
const pathParts = pathname.split('/').filter(Boolean)
const restIndex = pathParts.indexOf('rest')
if (restIndex === -1 || restIndex >= pathParts.length - 1) {
throw new Error(`Invalid REST path: ${pathname}`)
}
const category = pathParts[restIndex + 1]
const subcategory = pathParts[restIndex + 2] // May be undefined for category-only pages
// Get the REST operations data
const restData = await getRest(currentVersion, effectiveApiVersion)
let operations: Operation[] = []
if (subcategory && restData[category]?.[subcategory]) {
operations = restData[category][subcategory]
} else if (category && restData[category]) {
// For categories without subcategories, operations are nested directly
const categoryData = restData[category]
// Flatten all operations from all subcategories
operations = Object.values(categoryData).flat()
}
// Prepare manual content
let manualContent = ''
if (page.markdown) {
const markerIndex = page.markdown.indexOf(
'<!-- Content after this section is automatically generated -->',
)
if (markerIndex > 0) {
const { content } = matter(page.markdown)
const manualContentMarkerIndex = content.indexOf(
'<!-- Content after this section is automatically generated -->',
)
if (manualContentMarkerIndex > 0) {
const rawManualContent = content.substring(0, manualContentMarkerIndex).trim()
if (rawManualContent) {
manualContent = await renderContent(rawManualContent, {
...context,
markdownRequested: true,
})
}
}
}
}
// Prepare data for template
const templateData = await this.prepareTemplateData(
page,
operations,
context,
manualContent,
effectiveApiVersion,
)
// Load and render template
const templatePath = join(__dirname, '../templates/rest-page.template.md')
const templateContent = readFileSync(templatePath, 'utf8')
// Render the template with Liquid
const rendered = await renderContent(templateContent, {
...context,
...templateData,
markdownRequested: true,
})
return rendered
}
/**
* Prepare data for the Liquid template
*/
private async prepareTemplateData(
page: Page,
operations: Operation[],
context: Context,
manualContent: string,
apiVersion?: string,
): Promise<Record<string, any>> {
// Prepare page intro
const intro = page.intro ? await page.renderProp('intro', context, { textOnly: true }) : ''
// Prepare operations for the template
const preparedOperations = await Promise.all(
operations.map(async (operation) => await this.prepareOperation(operation)),
)
return {
page: {
title: page.title,
intro,
},
manualContent,
restOperations: preparedOperations,
apiVersion,
}
}
/**
* Prepare a single operation for template rendering
*/
private async prepareOperation(operation: Operation): Promise<Record<string, any>> {
// Convert HTML description to text
const description = operation.descriptionHTML ? fastTextOnly(operation.descriptionHTML) : ''
// Determine header settings
const needsContentTypeHeader = operation.subcategory === 'inference'
const omitHeaders =
operation.subcategory === 'management-console' || operation.subcategory === 'manage-ghes'
const showHeaders = !omitHeaders
// Check if operation has parameters
const hasParameters =
(operation.parameters?.length || 0) > 0 || (operation.bodyParameters?.length || 0) > 0
// Process status codes to convert HTML descriptions to plain text
const statusCodes = operation.statusCodes?.map((statusCode) => ({
...statusCode,
description: statusCode.description ? fastTextOnly(statusCode.description) : undefined,
}))
// Prepare code examples with processed URLs
const codeExamples =
operation.codeExamples?.map((example) => {
let url = `${operation.serverUrl}${operation.requestPath}`
// Replace path parameters in URL
if (example.request?.parameters && Object.keys(example.request.parameters).length > 0) {
for (const [key, value] of Object.entries(example.request.parameters)) {
url = url.replace(`{${key}}`, String(value))
}
}
return {
request: {
description: example.request?.description
? fastTextOnly(example.request.description)
: '',
url,
acceptHeader: example.request?.acceptHeader,
bodyParameters: example.request?.bodyParameters
? JSON.stringify(example.request.bodyParameters, null, 2)
: null,
},
response: {
statusCode: example.response?.statusCode,
schema: (example.response as any)?.schema
? JSON.stringify((example.response as any).schema, null, 2)
: null,
},
}
}) || []
return {
...operation,
description,
hasParameters,
showHeaders,
needsContentTypeHeader,
statusCodes,
codeExamples,
}
}
}

View File

@@ -0,0 +1,103 @@
import type { Context, Page } from '@/types'
/**
* Base interface for page-to-markdown transformers
*
* Transformers convert autogenerated pages (REST, webhooks, etc.)
* into markdown format for the Article API
*/
export interface PageTransformer {
/**
* Check if this transformer can handle the given page
*/
canTransform(page: Page): boolean
/**
* Transform the page into markdown format
* @param page - The page to transform
* @param pathname - The pathname of the page
* @param context - The rendering context
* @param apiVersion - Optional API version (e.g., '2022-11-28' for REST API calendar versioning)
*/
transform(page: Page, pathname: string, context: Context, apiVersion?: string): Promise<string>
}
/**
* Registry of available transformers for converting pages to markdown
*
* The TransformerRegistry manages a collection of PageTransformer instances
* and provides a mechanism to find the appropriate transformer for a given page.
*
* Transformers are evaluated in registration order. The first transformer
* whose `canTransform()` method returns true will be selected.
*
* @example
* ```typescript
* const registry = new TransformerRegistry()
*
* // Register transformers in priority order
* registry.register(new RestTransformer())
* registry.register(new WebhookTransformer())
* registry.register(new GraphQLTransformer())
*
* // Find and use a transformer
* const transformer = registry.findTransformer(page)
* if (transformer) {
* const markdown = await transformer.transform(page, pathname, context)
* }
* ```
*
* @remarks
* This class is not thread-safe. In server environments with concurrent requests,
* register all transformers during initialization before handling requests.
*/
export class TransformerRegistry {
private transformers: PageTransformer[] = []
/**
* Register a new transformer
*
* Transformers are evaluated in registration order when finding a match.
* Register more specific transformers before more general ones.
*
* @param transformer - The transformer to register
*
* @example
* ```typescript
* const registry = new TransformerRegistry()
* registry.register(new RestTransformer())
* ```
*/
register(transformer: PageTransformer): void {
this.transformers.push(transformer)
}
/**
* Find a transformer that can handle the given page
*
* Iterates through registered transformers in registration order and returns
* the first transformer whose `canTransform()` method returns true.
*
* @param page - The page to find a transformer for
* @returns The first matching transformer, or null if:
* - The page is null/undefined
* - No registered transformer can handle the page
*
* @example
* ```typescript
* const transformer = registry.findTransformer(page)
* if (transformer) {
* const markdown = await transformer.transform(page, pathname, context)
* } else {
* // Handle case where no transformer is available
* console.warn('No transformer found for page:', page.relativePath)
* }
* ```
*/
findTransformer(page: Page): PageTransformer | null {
if (page == null) {
return null
}
return this.transformers.find((t) => t.canTransform(page)) || null
}
}

View File

@@ -1,20 +1,16 @@
import { Tokenizer, TokenKind } from 'liquidjs'
import type { TopLevelToken, TagToken } from 'liquidjs'
import { deprecated } from '@/versions/lib/enterprise-server-releases'
// Using `any` for the cache because TopLevelToken is a complex union type from liquidjs
// that includes TagToken, OutputToken, and HTMLToken with different properties.
// The cache is private to this module and we control all access to it.
const liquidTokenCache = new Map<string, any>()
// Cache for liquid tokens to improve performance
const liquidTokenCache = new Map<string, TopLevelToken[]>()
// Returns `any[]` instead of `TopLevelToken[]` because TopLevelToken is a union type
// (TagToken | OutputToken | HTMLToken) and consumers of this function access properties
// like `name` and `args` that only exist on TagToken. Using `any` here avoids complex
// type narrowing throughout the codebase.
// Returns TopLevelToken array from liquidjs which is a union of TagToken, OutputToken, and HTMLToken
export function getLiquidTokens(
content: string,
{ noCache = false }: { noCache?: boolean } = {},
): any[] {
): TopLevelToken[] {
if (!content) return []
if (noCache) {
@@ -23,13 +19,13 @@ export function getLiquidTokens(
}
if (liquidTokenCache.has(content)) {
return liquidTokenCache.get(content)
return liquidTokenCache.get(content)!
}
const tokenizer = new Tokenizer(content)
const tokens = tokenizer.readTopLevelTokens()
liquidTokenCache.set(content, tokens)
return liquidTokenCache.get(content)
return liquidTokenCache.get(content)!
}
export const OUTPUT_OPEN = '{%'
@@ -40,10 +36,9 @@ export const TAG_CLOSE = '}}'
export const conditionalTags = ['if', 'elseif', 'unless', 'case', 'ifversion']
const CONDITIONAL_TAG_NAMES = ['if', 'ifversion', 'elsif', 'else', 'endif']
// Token is `any` because it's used with different token types from liquidjs
// that all have `begin` and `end` properties but are part of complex union types.
// Token parameter uses TopLevelToken which has begin and end properties
export function getPositionData(
token: any,
token: TopLevelToken,
lines: string[],
): { lineNumber: number; column: number; length: number } {
// Liquid indexes are 0-based, but we want to
@@ -77,9 +72,9 @@ export function getPositionData(
* by Markdownlint:
* [ { lineNumber: 1, column: 1, deleteCount: 3, }]
*/
// Token is `any` because it's used with different token types from liquidjs.
// Token parameter uses TopLevelToken from liquidjs
export function getContentDeleteData(
token: any,
token: TopLevelToken,
tokenEnd: number,
lines: string[],
): Array<{ lineNumber: number; column: number; deleteCount: number }> {
@@ -123,15 +118,14 @@ export function getContentDeleteData(
// related elsif, else, and endif tags).
// Docs doesn't use the standard `if` tag for versioning, instead the
// `ifversion` tag is used.
// Returns `any[]` because the tokens need to be accessed as TagToken with `name` and `args` properties,
// but TopLevelToken union type would require complex type narrowing.
export function getLiquidIfVersionTokens(content: string): any[] {
// Returns TagToken array since we filter to only Tag tokens
export function getLiquidIfVersionTokens(content: string): TagToken[] {
const tokens = getLiquidTokens(content)
.filter((token) => token.kind === TokenKind.Tag)
.filter((token): token is TagToken => token.kind === TokenKind.Tag)
.filter((token) => CONDITIONAL_TAG_NAMES.includes(token.name))
let inIfStatement = false
const ifVersionTokens: any[] = []
const ifVersionTokens: TagToken[] = []
for (const token of tokens) {
if (token.name === 'if') {
inIfStatement = true

View File

@@ -11,8 +11,8 @@ export function addFixErrorDetail(
actual: string,
// Using flexible type to accommodate different range formats from various linting rules
range: [number, number] | number[] | null,
// Using any for fixInfo as markdownlint-rule-helpers accepts various fix info structures
fixInfo: any,
// Using unknown for fixInfo as markdownlint-rule-helpers accepts various fix info structures
fixInfo: unknown,
): void {
addError(onError, lineNumber, `Expected: ${expected}`, ` Actual: ${actual}`, range, fixInfo)
}
@@ -20,9 +20,11 @@ export function addFixErrorDetail(
export function forEachInlineChild(
params: RuleParams,
type: string,
// Using any for child and token types because different linting rules pass tokens with varying structures
// beyond the base MarkdownToken interface (e.g., ImageToken with additional properties)
handler: (child: any, token: any) => void,
// Handler uses `any` for function parameter variance reasons. TypeScript's contravariance rules for function
// parameters mean that a function accepting a specific type cannot be assigned to a parameter of type `unknown`.
// Therefore, `unknown` cannot be used here, as different linting rules pass tokens with varying structures
// beyond the base MarkdownToken interface, and some handlers are async.
handler: (child: any, token?: any) => void | Promise<void>,
): void {
filterTokens(params, 'inline', (token: MarkdownToken) => {
for (const child of token.children!.filter((c) => c.type === type)) {
@@ -146,8 +148,8 @@ export const docsDomains = ['docs.github.com', 'help.github.com', 'developer.git
// This is the format we get from Markdownlint.
// Returns null if the lines do not contain
// frontmatter properties.
// Returns frontmatter as a Record with any values since YAML can contain various types
export function getFrontmatter(lines: string[]): Record<string, any> | null {
// Returns frontmatter as a Record with unknown values since YAML can contain various types
export function getFrontmatter(lines: string[]): Record<string, unknown> | null {
const fmString = lines.join('\n')
const { data } = matter(fmString)
// If there is no frontmatter or the frontmatter contains

View File

@@ -1,7 +1,7 @@
import { filterTokens } from 'markdownlint-rule-helpers'
import { addFixErrorDetail, getRange } from '../helpers/utils'
import type { RuleParams, RuleErrorCallback, Rule } from '../../types'
import type { RuleParams, RuleErrorCallback, Rule, MarkdownToken } from '../../types'
export const internalLinksSlash: Rule = {
names: ['GHD003', 'internal-links-slash'],
@@ -9,8 +9,8 @@ export const internalLinksSlash: Rule = {
tags: ['links', 'url'],
parser: 'markdownit',
function: (params: RuleParams, onError: RuleErrorCallback) => {
// Using 'any' type for token as markdownlint-rule-helpers doesn't provide TypeScript types
filterTokens(params, 'inline', (token: any) => {
filterTokens(params, 'inline', (token: MarkdownToken) => {
if (!token.children) return
for (const child of token.children) {
if (child.type !== 'link_open') continue
@@ -20,6 +20,7 @@ export const internalLinksSlash: Rule = {
// ['rel', 'canonical'],
// ]
// Attribute arrays are tuples of [attributeName, attributeValue] from markdownit parser
if (!child.attrs) continue
const hrefsMissingSlashes = child.attrs
// The attribute could also be `target` or `rel`
.filter((attr: [string, string]) => attr[0] === 'href')

View File

@@ -1,5 +1,6 @@
import { addError } from 'markdownlint-rule-helpers'
import { TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { getDataByLanguage } from '@/data-directory/lib/get-data'
import {
@@ -23,10 +24,9 @@ export const liquidDataReferencesDefined = {
parser: 'markdownit',
function: (params: RuleParams, onError: RuleErrorCallback) => {
const content = params.lines.join('\n')
// Using any type because getLiquidTokens returns tokens from liquidjs library without complete type definitions
const tokens = getLiquidTokens(content)
.filter((token: any) => token.kind === TokenKind.Tag)
.filter((token: any) => token.name === 'data' || token.name === 'indented_data_reference')
.filter((token): token is TagToken => token.kind === TokenKind.Tag)
.filter((token) => token.name === 'data' || token.name === 'indented_data_reference')
if (!tokens.length) return
@@ -60,13 +60,11 @@ export const liquidDataTagFormat = {
function: (params: RuleParams, onError: RuleErrorCallback) => {
const CHECK_LIQUID_TAGS = [OUTPUT_OPEN, OUTPUT_CLOSE, '{', '}']
const content = params.lines.join('\n')
// Using any type because getLiquidTokens returns tokens from liquidjs library without complete type definitions
// Tokens have properties like 'kind', 'name', 'args', and 'content' that aren't fully typed
const tokenTags = getLiquidTokens(content).filter((token: any) => token.kind === TokenKind.Tag)
const dataTags = tokenTags.filter((token: any) => token.name === 'data')
const indentedDataTags = tokenTags.filter(
(token: any) => token.name === 'indented_data_reference',
const tokenTags = getLiquidTokens(content).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
const dataTags = tokenTags.filter((token) => token.name === 'data')
const indentedDataTags = tokenTags.filter((token) => token.name === 'indented_data_reference')
for (const token of dataTags) {
// A data tag has only one argument, the data directory path.

View File

@@ -1,4 +1,5 @@
import { addError } from 'markdownlint-rule-helpers'
import type { TopLevelToken } from 'liquidjs'
import {
getLiquidIfVersionTokens,
@@ -35,8 +36,11 @@ export const liquidIfversionVersions = {
const fileVersionsFm = params.name.startsWith('data')
? { ghec: '*', ghes: '*', fpt: '*' }
: fm
? fm.versions
: getFrontmatter(params.frontMatterLines)?.versions
? (fm.versions as string | Record<string, string> | undefined)
: (getFrontmatter(params.frontMatterLines)?.versions as
| string
| Record<string, string>
| undefined)
// This will only contain valid (non-deprecated) and future versions
const fileVersions = getApplicableVersions(fileVersionsFm, '', {
doNotThrow: true,
@@ -134,7 +138,7 @@ function setLiquidErrors(condTagItems: any[], onError: RuleErrorCallback, lines:
{
begin: item.begin,
end: item.end,
},
} as TopLevelToken,
lines,
)
const deleteCount = length - column + 1 === lines[lineNumber - 1].length ? -1 : length
@@ -159,7 +163,7 @@ function setLiquidErrors(condTagItems: any[], onError: RuleErrorCallback, lines:
{
begin: item.contentrange[0],
end: item.contentrange[1],
},
} as TopLevelToken,
lines,
)
const insertText = `${item.action.name || item.name} ${item.action.cond || item.cond}`

View File

@@ -1,4 +1,5 @@
import { TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { addError } from 'markdownlint-rule-helpers'
import { getLiquidTokens, conditionalTags, getPositionData } from '../helpers/liquid-utils'
@@ -19,14 +20,12 @@ export const liquidQuotedConditionalArg: Rule = {
tags: ['liquid', 'format'],
function: (params: RuleParams, onError: RuleErrorCallback) => {
const content = params.lines.join('\n')
// Using 'any' type for tokens as getLiquidTokens returns tokens from liquid-utils.ts which lacks type definitions
const tokens = getLiquidTokens(content)
.filter((token: any) => token.kind === TokenKind.Tag)
.filter((token: any) => conditionalTags.includes(token.name))
.filter((token: any) => {
.filter((token): token is TagToken => token.kind === TokenKind.Tag)
.filter((token) => conditionalTags.includes(token.name))
.filter((token) => {
const tokensArray = token.args.split(/\s+/g)
// Using 'any' for args as they come from the untyped liquid token structure
if (tokensArray.some((arg: any) => isStringQuoted(arg))) return true
if (tokensArray.some((arg) => isStringQuoted(arg))) return true
return false
})

View File

@@ -33,6 +33,7 @@ export const frontmatterLiquidSyntax = {
for (const key of keysWithLiquid) {
const value = fm[key]
if (typeof value !== 'string') continue
try {
liquid.parse(value)
} catch (error) {

View File

@@ -1,4 +1,5 @@
import { TokenKind } from 'liquidjs'
import type { TopLevelToken } from 'liquidjs'
import { getLiquidTokens, getPositionData } from '../helpers/liquid-utils'
import { addFixErrorDetail } from '../helpers/utils'
@@ -36,7 +37,10 @@ export const liquidTagWhitespace: Rule = {
(token: LiquidToken) => token.kind === TokenKind.Tag,
)
for (const token of tokens) {
const { lineNumber, column, length } = getPositionData(token, params.lines)
const { lineNumber, column, length } = getPositionData(
token as unknown as TopLevelToken,
params.lines,
)
const range = [column, length]
const tag = params.lines[lineNumber - 1].slice(column - 1, column - 1 + length)

View File

@@ -1,5 +1,6 @@
import semver from 'semver'
import { TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { addError } from 'markdownlint-rule-helpers'
import { getRange, addFixErrorDetail } from '../helpers/utils'
@@ -13,7 +14,7 @@ import type { RuleParams, RuleErrorCallback } from '@/content-linter/types'
interface Feature {
versions: Record<string, string>
[key: string]: any
[key: string]: unknown
}
type AllFeatures = Record<string, Feature>
@@ -60,12 +61,13 @@ export const liquidIfTags = {
function: (params: RuleParams, onError: RuleErrorCallback) => {
const content = params.lines.join('\n')
const tokens = getLiquidTokens(content).filter(
(token) =>
token.kind === TokenKind.Tag &&
token.name === 'if' &&
token.args.split(/\s+/).some((arg: string) => getAllPossibleVersionNames().has(arg)),
)
const tokens = getLiquidTokens(content)
.filter((token): token is TagToken => token.kind === TokenKind.Tag)
.filter(
(token) =>
token.name === 'if' &&
token.args.split(/\s+/).some((arg: string) => getAllPossibleVersionNames().has(arg)),
)
for (const token of tokens) {
const args = token.args
@@ -90,7 +92,7 @@ export const liquidIfVersionTags = {
function: (params: RuleParams, onError: RuleErrorCallback) => {
const content = params.lines.join('\n')
const tokens = getLiquidTokens(content)
.filter((token) => token.kind === TokenKind.Tag)
.filter((token): token is TagToken => token.kind === TokenKind.Tag)
.filter((token) => token.name === 'ifversion' || token.name === 'elsif')
for (const token of tokens) {

View File

@@ -1,5 +1,6 @@
import { addError } from 'markdownlint-rule-helpers'
import { TokenKind } from 'liquidjs'
import type { TopLevelToken } from 'liquidjs'
import path from 'path'
import { getFrontmatter } from '../helpers/utils'
@@ -45,7 +46,10 @@ export const raiReusableUsage: Rule = {
if (dataDirectoryReference.startsWith('reusables.rai')) continue
const lines = params.lines
const { lineNumber, column, length } = getPositionData(token, lines)
const { lineNumber, column, length } = getPositionData(
token as unknown as TopLevelToken,
lines,
)
addError(
onError,
lineNumber,

View File

@@ -22,7 +22,8 @@ import yaml from 'js-yaml'
import { program } from 'commander'
import { loadPages, loadUnversionedTree } from '@/frame/lib/page-data'
import { TokenizationError } from 'liquidjs'
import { TokenizationError, TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import readFrontmatter from '@/frame/lib/read-frontmatter'
import { getLiquidTokens } from '@/content-linter/lib/helpers/liquid-utils'
@@ -137,7 +138,10 @@ function getReusableFiles(root = 'data') {
function checkString(string: string, variables: Map<string, string>) {
try {
for (const token of getLiquidTokens(string)) {
const tokens = getLiquidTokens(string).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
for (const token of tokens) {
if (token.name === 'data') {
const { args } = token
variables.delete(args)

View File

@@ -10,6 +10,7 @@ import { Tool, tags as toolTags } from './tool'
import { Spotlight, tags as spotlightTags } from './spotlight'
import { Prompt } from './prompt'
import IndentedDataReference from './indented-data-reference'
import { apiTransformerTags } from '@/article-api/liquid-renderers'
// Type assertions for .js files without type definitions
// Copilot: Remove these assertions when the corresponding .js files are converted to TypeScript
@@ -40,6 +41,11 @@ for (const tag in spotlightTags) {
engine.registerTag('prompt', anyPrompt)
// Register API transformer tags
for (const [tagName, tagClass] of Object.entries(apiTransformerTags)) {
engine.registerTag(tagName, tagClass as any)
}
/**
* Like the `size` filter, but specifically for
* getting the number of keys in an object

View File

@@ -2,25 +2,25 @@
// Defines {% prompt %}…{% endprompt %} to wrap its content in <code> and append the Copilot icon.
import octicons from '@primer/octicons'
import type { TagToken, TopLevelToken } from 'liquidjs'
import { generatePromptId } from '../lib/prompt-id'
interface LiquidTag {
type: 'block'
templates?: any[] // Note: Using 'any' because liquidjs doesn't provide proper types for template objects
// Note: Using 'any' for liquid-related parameters because liquidjs doesn't provide comprehensive TypeScript definitions
parse(tagToken: any, remainTokens: any): void
render(scope: any): Generator<any, string, unknown>
templates?: unknown[]
parse(tagToken: TagToken, remainTokens: TopLevelToken[]): void
render(scope: unknown): Generator<unknown, string, unknown>
}
export const Prompt: LiquidTag = {
type: 'block',
// Collect everything until {% endprompt %}
parse(tagToken: any, remainTokens: any): void {
parse(tagToken: TagToken, remainTokens: TopLevelToken[]): void {
this.templates = []
const stream = this.liquid.parser.parseStream(remainTokens)
stream
.on('template', (tpl: any) => this.templates.push(tpl))
.on('template', (tpl: unknown) => this.templates.push(tpl))
.on('tag:endprompt', () => stream.stop())
.on('end', () => {
throw new Error(`{% prompt %} tag not closed`)
@@ -29,7 +29,7 @@ export const Prompt: LiquidTag = {
},
// Render the inner Markdown, wrap in <code>, then append the SVG
*render(scope: any): Generator<any, string, unknown> {
*render(scope: unknown): Generator<unknown, string, unknown> {
const content = yield this.liquid.renderer.renderTemplates(this.templates, scope)
const contentString = String(content)

View File

@@ -1,5 +1,7 @@
import fs from 'fs'
import path from 'path'
import { TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { getLiquidTokens } from '@/content-linter/lib/helpers/liquid-utils'
import {
getAllContentFilePaths,
@@ -21,7 +23,9 @@ export function findUnused({ absolute }: { absolute: boolean }) {
for (let i = 0; i < totalFiles; i++) {
const filePath = allFilePaths[i]
const fileContents = fs.readFileSync(filePath, 'utf-8')
const liquidTokens = getLiquidTokens(fileContents)
const liquidTokens = getLiquidTokens(fileContents).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
for (const token of liquidTokens) {
const { args, name } = token
if (

View File

@@ -1,5 +1,7 @@
import fs from 'fs'
import path from 'path'
import { TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { getLiquidTokens } from '@/content-linter/lib/helpers/liquid-utils'
import {
FilesWithLineNumbers,
@@ -51,7 +53,9 @@ export function findTopUsed(numberOfMostUsedToFind: number, { absolute }: { abso
const reusableCounts = new Map<string, number>()
for (const filePath of allFilePaths) {
const fileContents = fs.readFileSync(filePath, 'utf-8')
const liquidTokens = getLiquidTokens(fileContents)
const liquidTokens = getLiquidTokens(fileContents).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
for (const token of liquidTokens) {
const { args, name } = token
if (name === 'data' && args.startsWith('reusables.')) {

View File

@@ -1,6 +1,7 @@
import walk from 'walk-sync'
import path from 'path'
import { TokenizationError } from 'liquidjs'
import { TokenizationError, TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import { getLiquidTokens } from '@/content-linter/lib/helpers/liquid-utils'
const __dirname = path.dirname(new URL(import.meta.url).pathname)
@@ -56,7 +57,10 @@ export function getReusableLiquidString(reusablePath: string): string {
export function getIndicesOfLiquidVariable(liquidVariable: string, fileContents: string): number[] {
const indices: number[] = []
try {
for (const token of getLiquidTokens(fileContents)) {
const tokens = getLiquidTokens(fileContents).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
for (const token of tokens) {
if (token.name === 'data' && token.args.trim() === liquidVariable) {
indices.push(token.begin)
}

View File

@@ -11,13 +11,13 @@ export interface Context {
currentVersion?: string
currentProduct?: string
markdownRequested?: boolean
pages?: any
redirects?: any
pages?: Record<string, unknown>
redirects?: Record<string, string>
page?: {
fullPath: string
[key: string]: any
[key: string]: unknown
}
[key: string]: any
[key: string]: unknown
}
/**
@@ -27,20 +27,20 @@ export interface RenderOptions {
cache?: boolean | ((template: string, context: Context) => string | null)
filename?: string
textOnly?: boolean
[key: string]: any
[key: string]: unknown
}
/**
* Unified processor plugin function type
*/
export type UnifiedPlugin = (context?: Context) => any
export type UnifiedPlugin = (context?: Context) => unknown
/**
* VFile interface for unified processing
*/
export interface VFile {
toString(): string
[key: string]: any
[key: string]: unknown
}
/**
@@ -48,5 +48,5 @@ export interface VFile {
*/
export interface UnifiedProcessor {
process(content: string): Promise<VFile>
use(plugin: any, ...args: any[]): UnifiedProcessor
use(plugin: unknown, ...args: unknown[]): UnifiedProcessor
}

View File

@@ -40,21 +40,21 @@ export function createProcessor(context: Context): UnifiedProcessor {
.use(gfm)
// Markdown AST below vvv
.use(parseInfoString)
// Using 'as any' because rewriteLocalLinks is a factory function that takes context
// Using type assertion because rewriteLocalLinks is a factory function that takes context
// and returns a transformer, but TypeScript's unified plugin types don't handle this pattern
.use(rewriteLocalLinks as any, context)
.use(rewriteLocalLinks as unknown as (ctx: Context) => void, context)
.use(emoji)
// Markdown AST above ^^^
.use(remark2rehype, { allowDangerousHtml: true })
// HTML AST below vvv
.use(slug)
// useEnglishHeadings plugin requires context with englishHeadings property
.use(useEnglishHeadings as any, context || {})
.use(useEnglishHeadings as unknown as (ctx: Context) => void, context || {})
.use(headingLinks)
.use(codeHeader)
.use(annotate, context)
// Using 'as any' for highlight plugin due to complex type mismatch between unified and rehype-highlight
.use(highlight as any, {
// Using type assertion for highlight plugin due to complex type mismatch between unified and rehype-highlight
.use(highlight as unknown as (options: unknown) => void, {
languages: { ...common, graphql, dockerfile, http, groovy, erb, powershell },
subset: false,
aliases: {
@@ -82,9 +82,9 @@ export function createProcessor(context: Context): UnifiedProcessor {
.use(rewriteImgSources)
.use(rewriteAssetImgTags)
// alerts plugin requires context with alertTitles property
.use(alerts as any, context || {})
.use(alerts as unknown as (ctx: Context) => void, context || {})
// HTML AST above ^^^
.use(html) as UnifiedProcessor // String below vvv
.use(html) as unknown as UnifiedProcessor // String below vvv
)
}
@@ -93,10 +93,10 @@ export function createMarkdownOnlyProcessor(context: Context): UnifiedProcessor
unified()
.use(remarkParse)
.use(gfm)
// Using 'as any' because rewriteLocalLinks is a factory function that takes context
// Using type assertion because rewriteLocalLinks is a factory function that takes context
// and returns a transformer, but TypeScript's unified plugin types don't handle this pattern
.use(rewriteLocalLinks as any, context)
.use(remarkStringify) as UnifiedProcessor
.use(rewriteLocalLinks as unknown as (ctx: Context) => void, context)
.use(remarkStringify) as unknown as UnifiedProcessor
)
}
@@ -105,12 +105,12 @@ export function createMinimalProcessor(context: Context): UnifiedProcessor {
unified()
.use(remarkParse)
.use(gfm)
// Using 'as any' because rewriteLocalLinks is a factory function that takes context
// Using type assertion because rewriteLocalLinks is a factory function that takes context
// and returns a transformer, but TypeScript's unified plugin types don't handle this pattern
.use(rewriteLocalLinks as any, context)
.use(rewriteLocalLinks as unknown as (ctx: Context) => void, context)
.use(remark2rehype, { allowDangerousHtml: true })
.use(slug)
.use(raw)
.use(html) as UnifiedProcessor
.use(html) as unknown as UnifiedProcessor
)
}

View File

@@ -2,14 +2,10 @@ import GithubSlugger from 'github-slugger'
import { encode } from 'html-entities'
import { toString } from 'hast-util-to-string'
import { visit } from 'unist-util-visit'
import type { Element, Root } from 'hast'
const slugger = new GithubSlugger()
// Note: Using 'any' for node because the unist/hast type system is complex and
// the visit function's type constraints don't easily allow for proper element typing
// without extensive type gymnastics. The runtime check ensures type safety.
const matcher = (node: any) => node.type === 'element' && ['h2', 'h3', 'h4'].includes(node.tagName)
interface UseEnglishHeadingsOptions {
englishHeadings?: Record<string, string>
}
@@ -17,12 +13,9 @@ interface UseEnglishHeadingsOptions {
// replace translated IDs and links in headings with English
export default function useEnglishHeadings({ englishHeadings }: UseEnglishHeadingsOptions) {
if (!englishHeadings) return
// Note: Using 'any' for tree because unified's AST types are complex and
// this function works with different tree types depending on the processor
return (tree: any) => {
// Note: Using 'any' for node because visit() callback typing is restrictive
// and doesn't easily allow for proper element typing without complex generics
visit(tree, matcher, (node: any) => {
return (tree: Root) => {
visit(tree, 'element', (node: Element) => {
if (!['h2', 'h3', 'h4'].includes(node.tagName)) return
slugger.reset()
// Get the plain text content of the heading node
const text: string = toString(node)

View File

@@ -32,7 +32,8 @@ import fs from 'fs'
import path from 'path'
import chalk from 'chalk'
import { TokenizationError } from 'liquidjs'
import { TokenizationError, TokenKind } from 'liquidjs'
import type { TagToken } from 'liquidjs'
import type { Page } from '@/types'
import warmServer from '@/frame/lib/warm-server'
@@ -246,7 +247,10 @@ function checkString(
// a LOT of different strings in and the cache will fill up rapidly
// when testing every possible string in every possible language for
// every page.
for (const token of getLiquidTokens(string, { noCache: true })) {
const tokens = getLiquidTokens(string, { noCache: true }).filter(
(token): token is TagToken => token.kind === TokenKind.Tag,
)
for (const token of tokens) {
if (token.name === 'ifversion' || token.name === 'elsif') {
for (const arg of token.args.split(/\s+/)) {
if (IGNORE_ARGS.has(arg)) continue

View File

@@ -11,4 +11,5 @@ versions:
ghec: '*'
children:
- /category
- /using-workflows
---

View File

@@ -0,0 +1,12 @@
---
title: Using workflows
intro: Learn how to use workflows in GitHub Actions.
versions:
fpt: '*'
ghec: '*'
ghes: '*'
children:
- /storing-workflow-data-as-artifacts
---
This is a fixture index page for testing.

View File

@@ -0,0 +1,10 @@
---
title: Storing workflow data as artifacts
intro: Artifacts allow you to share data between jobs in a workflow and store data once that workflow has completed.
versions:
fpt: '*'
ghec: '*'
ghes: '*'
---
This is a fixture file for testing links in the REST API artifacts documentation.

View File

@@ -16,4 +16,6 @@ autogenerated: rest
## About artifacts in {% data variables.product.prodname_actions %}
You can use the REST API to download, delete, and retrieve information about workflow artifacts in {% data variables.product.prodname_actions %}. Artifacts enable you to share data between jobs in a workflow and store data once that workflow has completed. For more information, see [AUTOTITLE](/actions/using-workflows/storing-workflow-data-as-artifacts).
<!-- Content after this section is automatically generated -->

View File

@@ -18,6 +18,7 @@ import {
import { useTheme } from '@/color-schemes/components/useTheme'
import { SharedUIContextProvider } from '@/frame/components/context/SharedUIContext'
import { CTAPopoverProvider } from '@/frame/components/context/CTAContext'
import type { ExtendedRequest } from '@/types'
type MyAppProps = AppProps & {
isDotComAuthenticated: boolean
@@ -158,7 +159,7 @@ MyApp.getInitialProps = async (appContext: AppContext) => {
const { ctx } = appContext
// calls page's `getInitialProps` and fills `appProps.pageProps`
const appProps = await App.getInitialProps(appContext)
const req: any = ctx.req
const req = ctx.req as unknown as ExtendedRequest
// Have to define the type manually here because `req.context.languages`
// comes from Node JS and is not type-aware.
@@ -188,11 +189,14 @@ MyApp.getInitialProps = async (appContext: AppContext) => {
}
}
}
const stagingName = req.headers['x-ong-external-url']?.match(/staging-(\w+)\./)?.[1]
const headerValue = req.headers['x-ong-external-url']
const stagingName = (typeof headerValue === 'string' ? headerValue : headerValue?.[0])?.match(
/staging-(\w+)\./,
)?.[1]
return {
...appProps,
languagesContext,
stagingName: stagingNames.has(stagingName) ? stagingName : undefined,
stagingName: stagingName && stagingNames.has(stagingName) ? stagingName : undefined,
}
}

View File

@@ -190,7 +190,7 @@ async function createIssue(
body,
labels,
})
} catch (error: any) {
} catch (error: unknown) {
console.log(`#ERROR# ${error}\n🛑 There was an error creating the issue.`)
throw error
}
@@ -223,7 +223,7 @@ async function updateIssue(
body,
labels,
})
} catch (error: any) {
} catch (error: unknown) {
console.log(
`#ERROR# ${error}\n🛑 There was an error updating issue ${issueNumber} in ${fullRepo}.`,
)
@@ -244,8 +244,13 @@ async function addRepoLabels(fullRepo: string, labels: string[]) {
repo,
name,
})
} catch (error: any) {
if (error.status === 404) {
} catch (error: unknown) {
if (
typeof error === 'object' &&
error !== null &&
'status' in error &&
(error as { status: number }).status === 404
) {
labelsToAdd.push(name)
} else {
console.log(`#ERROR# ${error}\n🛑 There was an error getting the label ${name}.`)
@@ -260,7 +265,7 @@ async function addRepoLabels(fullRepo: string, labels: string[]) {
repo,
name,
})
} catch (error: any) {
} catch (error: unknown) {
console.log(`#ERROR# ${error}\n🛑 There was an error adding the label ${name}.`)
throw error
}

View File

@@ -1,4 +1,5 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'
import type { Request, Response } from 'express'
import { createLogger } from '@/observability/logger'
import { initLoggerContext, updateLoggerContext } from '@/observability/logger/lib/logger-context'
@@ -8,7 +9,7 @@ describe('logger integration tests', () => {
let originalConsoleError: typeof console.error
let originalEnv: typeof process.env
const consoleLogs: string[] = []
const consoleErrors: any[] = []
const consoleErrors: unknown[] = []
beforeEach(() => {
// Store original console methods and environment
@@ -20,7 +21,7 @@ describe('logger integration tests', () => {
console.log = vi.fn((message: string) => {
consoleLogs.push(message)
})
console.error = vi.fn((error: any) => {
console.error = vi.fn((error: unknown) => {
consoleErrors.push(error)
})
@@ -78,9 +79,9 @@ describe('logger integration tests', () => {
'accept-language': 'en-US,en;q=0.9',
},
query: { filter: 'active' },
} as any
} as unknown as Request
const mockRes = {} as any
const mockRes = {} as unknown as Response
// Use a Promise to handle the async local storage execution
const result = await new Promise<void>((resolve, reject) => {

View File

@@ -1,6 +1,8 @@
import { GetServerSideProps } from 'next'
import { Liquid } from 'liquidjs'
import pick from 'lodash/pick'
import get from 'lodash/get'
import type { Response } from 'express'
import {
MainContextT,
@@ -11,6 +13,7 @@ import {
import { DefaultLayout } from '@/frame/components/DefaultLayout'
import { GHESReleaseNotes } from '@/release-notes/components/GHESReleaseNotes'
import { GHESReleaseNotesContextT } from '@/release-notes/components/types'
import type { ExtendedRequest } from '@/types'
const liquid = new Liquid()
type Props = {
@@ -33,22 +36,30 @@ export default function ReleaseNotes({ mainContext, ghesContext }: Props) {
)
}
export const getServerSideProps: GetServerSideProps<Props> = async (context) => {
const req = context.req as any
const res = context.res as any
export const getServerSideProps: GetServerSideProps<Props> = async (
context,
): Promise<{ props: Props }> => {
const req = context.req as unknown as ExtendedRequest
const res = context.res as unknown as Response
// The `req.context.allVersion[X]` entries contains more keys (and values)
// than we need so only pick out the keys that are actually needed
// explicitly in the components served from these props.
const currentVersion = pick(req.context.allVersions[req.context.currentVersion], [
const currentVersion = pick(req.context!.allVersions?.[req.context!.currentVersion!] || {}, [
'plan',
'planTitle',
'versionTitle',
'currentRelease',
'releases',
])
]) as {
plan?: string
planTitle?: string
versionTitle?: string
currentRelease?: string
releases?: string[]
}
const { latestPatch = '', latestRelease = '' } = req.context
const { latestPatch = '', latestRelease = '' } = req.context!
const mainContext = await getMainContext(req, res)
addUINamespaces(req, mainContext.data.ui, ['release_notes'])
@@ -58,28 +69,39 @@ export const getServerSideProps: GetServerSideProps<Props> = async (context) =>
mainContext,
ghesContext:
currentVersion.plan === 'enterprise-server'
? {
? ({
currentVersion,
latestPatch,
latestRelease,
releaseNotes: req.context.ghesReleaseNotes,
releases: req.context.ghesReleases,
releaseNotes: req.context!.ghesReleaseNotes || [],
releases: req.context!.ghesReleases || [],
message: {
ghes_release_notes_upgrade_patch_only: liquid.parseAndRenderSync(
req.context.site.data.ui.header.notices.ghes_release_notes_upgrade_patch_only,
get(
req.context,
'site.data.ui.header.notices.ghes_release_notes_upgrade_patch_only',
'',
) as string,
{ latestPatch, latestRelease },
),
ghes_release_notes_upgrade_release_only: liquid.parseAndRenderSync(
req.context.site.data.ui.header.notices.ghes_release_notes_upgrade_release_only,
get(
req.context,
'site.data.ui.header.notices.ghes_release_notes_upgrade_release_only',
'',
) as string,
{ latestPatch, latestRelease },
),
ghes_release_notes_upgrade_patch_and_release: liquid.parseAndRenderSync(
req.context.site.data.ui.header.notices
.ghes_release_notes_upgrade_patch_and_release,
get(
req.context,
'site.data.ui.header.notices.ghes_release_notes_upgrade_patch_and_release',
'',
) as string,
{ latestPatch, latestRelease },
),
},
}
} as unknown as GHESReleaseNotesContextT)
: null,
},
}

View File

@@ -130,12 +130,15 @@ export default async function buildRecords(
})
.on('error', (err) => {
// Track the failure
const url = (err as any).url
const relativePath = (err as any).relativePath
const url = (err as unknown as { url?: string }).url
const relativePath = (err as unknown as { relativePath?: string }).relativePath
// Check for HTTPError by name since it may come from a different module
if ((err instanceof HTTPError || err?.name === 'HTTPError') && (err as any).response) {
const httpErr = err as any
if (
(err instanceof HTTPError || err?.name === 'HTTPError') &&
(err as unknown as HTTPError).response
) {
const httpErr = err as unknown as HTTPError
failedPages.push({
url: httpErr.request?.requestUrl?.pathname || url,
relativePath,
@@ -146,7 +149,7 @@ export default async function buildRecords(
if (!noMarkers) process.stdout.write(chalk.red('✗'))
} else if (err instanceof Error) {
// Enhanced error handling for timeout and network errors
const errorType = (err.cause as any)?.code || err.name
const errorType = (err.cause as unknown as { code?: string })?.code || err.name
const isTimeout =
errorType === 'UND_ERR_HEADERS_TIMEOUT' ||
errorType === 'UND_ERR_CONNECT_TIMEOUT' ||

View File

@@ -14,8 +14,14 @@ interface GetApplicableVersionsOptions {
includeNextVersion?: boolean
}
// Using any for feature data as it's dynamically loaded from YAML files
let featureData: any = null
interface FeatureData {
[featureName: string]: {
versions: VersionsObject
}
}
// Feature data is dynamically loaded from YAML files
let featureData: FeatureData | null = null
const allVersionKeys = Object.keys(allVersions)
@@ -55,13 +61,13 @@ function getApplicableVersions(
? {}
: reduce(
versionsObj,
(result: any, value, key) => {
(result: VersionsObject, value, key) => {
if (key === 'feature') {
if (typeof value === 'string') {
Object.assign(result, { ...featureData[value]?.versions })
Object.assign(result, { ...featureData?.[value]?.versions })
} else if (Array.isArray(value)) {
for (const str of value) {
Object.assign(result, { ...featureData[str].versions })
Object.assign(result, { ...featureData?.[str]?.versions })
}
}
delete result[key]

View File

@@ -1,17 +1,27 @@
import { getLiquidTokens } from '@/content-linter/lib/helpers/liquid-utils'
import type { TagToken } from 'liquidjs'
import { TokenKind } from 'liquidjs'
type Token = {
name?: string
args?: string
// Type guard to check if a token is a TagToken
function isTagToken(token: unknown): token is TagToken {
return (
token !== null &&
typeof token === 'object' &&
'kind' in token &&
token.kind === TokenKind.Tag &&
'name' in token &&
typeof token.name === 'string' &&
'args' in token
)
}
const parsedLiquidTokensCache = new Map<string, Token[]>()
const parsedLiquidTokensCache = new Map<string, TagToken[]>()
export function inLiquid(filePath: string, fileContents: string, needle: string) {
if (!parsedLiquidTokensCache.has(filePath)) {
parsedLiquidTokensCache.set(filePath, getLiquidTokens(fileContents))
parsedLiquidTokensCache.set(filePath, getLiquidTokens(fileContents).filter(isTagToken))
}
const tokens = parsedLiquidTokensCache.get(filePath) as Token[]
const tokens = parsedLiquidTokensCache.get(filePath)!
for (const token of tokens) {
if (token.name === 'data') {
const { args } = token