1
0
mirror of synced 2026-01-14 03:01:18 -05:00

Merge pull request #28282 from github/repo-sync

Repo sync
This commit is contained in:
docs-bot
2023-09-15 10:58:47 -07:00
committed by GitHub
3 changed files with 53 additions and 12 deletions

View File

@@ -30,22 +30,45 @@ export function isStringPunctuated(text) {
// Filters a list of tokens by token type only when they match
// a specific token type order.
// For example, if a list of tokens contains:
// [ { type: 'inline'}, { type: 'list_item_close'}, { type: 'list_item_open'},
// { type: 'paragraph_open'}, { type: 'inline'}, { type: 'paragraph_close'}]
// And if the tokenOrder being looked for is
// `tokenOrder` defined ['list_item_open', 'paragraph_open', 'inline']
//
// [
// { type: 'inline'},
// { type: 'list_item_close'},
// { type: 'list_item_open'},
// { type: 'paragraph_open'},
// { type: 'inline'},
// { type: 'paragraph_close'},
// ]
//
// And if the `tokenOrder` being looked for is:
//
// [
// 'list_item_open',
// 'paragraph_open',
// 'inline'
// ]
//
// Then the return value would be the items that match that seaquence:
// Index 2-4:
// [{ type: 'list_item_open'}, { type: 'paragraph_open'}, { type: 'inline'}]
// [
// { type: 'inline'}, <-- Index 0 - NOT INCLUDED
// { type: 'list_item_close'}, <-- Index 1 - NOT INCLUDED
// { type: 'list_item_open'}, <-- Index 2 - INCLUDED
// { type: 'paragraph_open'}, <-- Index 3 - INCLUDED
// { type: 'inline'}, <-- Index 4 - INCLUDED
// { type: 'paragraph_close'}, <-- Index 5 - NOT INCLUDED
// ]
//
export function filterTokensByOrder(tokens, tokenOrder) {
const matches = []
// Get a list of token indexes that match the
// first token (root) in the tokenOrder array
const tokenRootIndexes = []
const firstTokenOrderType = tokenOrder[0]
tokens.forEach((token, index) => {
if (token.type === tokenOrder[0]) {
return tokenRootIndexes.push(index)
if (token.type === firstTokenOrderType) {
tokenRootIndexes.push(index)
}
})
@@ -54,7 +77,10 @@ export function filterTokensByOrder(tokens, tokenOrder) {
for (const tokenRootIndex of tokenRootIndexes) {
for (let i = 1; i < tokenOrder.length; i++) {
if (tokens[tokenRootIndex + i].type !== tokenOrder[i]) {
return
// This tokenRootIndex was a possible start,
// but doesn't match the tokenOrder perfectly, so break out
// of the inner loop before it reaches the end.
break
}
if (i === tokenOrder.length - 1) {
matches.push(...tokens.slice(tokenRootIndex, tokenRootIndex + i + 1))

View File

@@ -7,6 +7,11 @@ export const listFirstWordCapitalization = {
tags: ['ul', 'ol'],
information: new URL('https://github.com/github/docs/blob/main/src/content-linter/README.md'),
function: function GHD011(params, onError) {
// We're going to look for a sequence of 3 tokens. If the markdown
// is a really small string, it might not even have that many tokens
// in it. Can bail early.
if (params.tokens.length < 3) return
const inlineListItems = filterTokensByOrder(params.tokens, [
'list_item_open',
'paragraph_open',

View File

@@ -1,10 +1,6 @@
import { jest } from '@jest/globals'
import { runRule } from '../../lib/init-test.js'
import { listFirstWordCapitalization } from '../../lib/linting-rules/list-first-word-capitalization.js'
jest.setTimeout(30 * 1000)
describe(listFirstWordCapitalization.names.join(' - '), () => {
test('ensure multi-level lists catch incorrect capitalization errors', async () => {
const markdown = [
@@ -59,4 +55,18 @@ describe(listFirstWordCapitalization.names.join(' - '), () => {
const errors = result.markdown
expect(errors.length).toBe(0)
})
test("list items that aren't simple lists", async () => {
const markdown = ['- > Blockquote in a list', '- ### Heading in a list'].join('\n')
const result = await runRule(listFirstWordCapitalization, { markdown })
const errors = result.markdown
expect(errors.length).toBe(0)
})
test('works on markdown that has no lists at all, actually', async () => {
const markdown = '- \n'
const result = await runRule(listFirstWordCapitalization, { markdown })
const errors = result.markdown
expect(errors.length).toBe(0)
})
})