1
0
mirror of synced 2026-01-07 09:01:31 -05:00

Merge branch 'main' into noviicee

This commit is contained in:
Anamika
2022-09-07 13:25:21 +05:30
committed by GitHub
1648 changed files with 69372 additions and 15374 deletions

View File

@@ -18,7 +18,7 @@ If you aren't comfortable going through the steps alone, sync up with a docs eng
```
script/update-enterprise-dates.js
```
- [ ] Create REST files based on previous version. Copy the latest GHES version of the dereferenced file from `lib/rest/static/dereferenced` to a new file in the same directory for the new GHES release. Ex, `cp lib/rest/static/dereferenced/ghes-3.4.deref.json lib/rest/static/dereferenced/ghes-3.5.deref.json`. Then run `script/rest/updated-files.js --decorate-only` and check in the resulting files.
- [ ] Create REST files based on previous version. Copy the latest GHES version of the dereferenced file from `lib/rest/static/dereferenced` to a new file in the same directory for the new GHES release. Ex, `cp lib/rest/static/dereferenced/ghes-3.4.deref.json lib/rest/static/dereferenced/ghes-3.5.deref.json`. Then run `script/rest/update-files.js --decorate-only` and check in the resulting files.
- [ ] Create GraphQL files based on previous version:

View File

@@ -52,13 +52,6 @@ jobs:
ISSUE_TITLE: ${{ github.event.issue.title }}
ISSUE_BODY: ${{ github.event.issue.body }}
- name: Comment on the new issue
run: gh issue comment $NEW_ISSUE --body "This issue was originally opened in the open source repo as $OLD_ISSUE"
env:
GITHUB_TOKEN: ${{secrets.DOCUBOT_READORG_REPO_WORKFLOW_SCOPES}}
NEW_ISSUE: ${{ env.NEW_ISSUE }}
OLD_ISSUE: ${{ github.event.issue.html_url }}
- name: Comment on the old issue
run: gh issue comment $OLD_ISSUE --body "Thank you for opening this issue! Updates to the REST/GraphQL API description must be made internally. I have copied your issue to an internal issue, so I will close this issue."
env:
@@ -70,3 +63,10 @@ jobs:
env:
GITHUB_TOKEN: ${{secrets.DOCUBOT_READORG_REPO_WORKFLOW_SCOPES}}
OLD_ISSUE: ${{ github.event.issue.html_url }}
- name: Comment on the new issue
run: gh issue comment $NEW_ISSUE --body "This issue was originally opened in the open source repo as $OLD_ISSUE"
env:
GITHUB_TOKEN: ${{secrets.DOCUBOT_READORG_REPO_WORKFLOW_SCOPES}}
NEW_ISSUE: ${{ env.NEW_ISSUE }}
OLD_ISSUE: ${{ github.event.issue.html_url }}

View File

@@ -53,6 +53,14 @@ jobs:
# Don't care about CDN caching image URLs
DISABLE_REWRITE_ASSET_URLS: true
run: |
# Note as of Aug 2022, we *don't* check external links
# on the pages you touched in the PR. We could enable that
# but it has the added risk of false positives blocking CI.
# We are using this script for the daily/nightly checker that
# checks external links too. Once we're confident it really works
# well, we can consider enabling it here on every content PR too.
./script/rendered-content-link-checker.js \
--language en \
--max 100 \

View File

@@ -9,6 +9,7 @@ on:
types: [labeled]
permissions:
pull-requests: write
contents: none
jobs:

View File

@@ -23,6 +23,9 @@ env:
# Setting this will activate the jest tests that depend on actually
# sending real search queries to Elasticsearch
ELASTICSEARCH_URL: http://localhost:9200/
# Hopefully the name is clear enough. By enabling this, we're testing
# the future code.
ENABLE_SEARCH_RESULTS_PAGE: true
jobs:
test:
@@ -52,7 +55,7 @@ jobs:
if: ${{ matrix.test-group == 'content' }}
uses: getong/elasticsearch-action@95b501ab0c83dee0aac7c39b7cea3723bef14954
with:
elasticsearch version: '7.17.5'
elasticsearch version: '7.11.1'
host port: 9200
container port: 9200
host node port: 9300

1
.gitignore vendored
View File

@@ -2,6 +2,7 @@
.DS_Store
.env
.vscode/settings.json
.idea/
/node_modules/
npm-debug.log
coverage/

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 276 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

View File

@@ -5,7 +5,7 @@ import { sendEvent, EventType } from 'components/lib/events'
import { useRouter } from 'next/router'
import { useArticleContext } from 'components/context/ArticleContext'
import parseUserAgent from 'components/lib/user-agent'
import { parseUserAgent } from 'components/lib/user-agent'
const platforms = [
{ id: 'mac', label: 'Mac' },

View File

@@ -59,8 +59,9 @@ export const PlaygroundContextProvider = (props: { children: React.ReactNode })
const router = useRouter()
const [activeSectionIndex, setActiveSectionIndex] = useState(0)
const [scrollToSection, setScrollToSection] = useState<number>()
const path = router.asPath.split('?')[0].split('#')[0]
const path = router.asPath.includes('@latest')
? router.asPath.split('?')[0].split('#')[0].split('@latest')[1]
: router.asPath.split('?')[0].split('#')[0]
const relevantArticles = articles.filter(({ slug }) => slug === path)
const { langId } = router.query

View File

@@ -81,7 +81,8 @@ export function getCssTheme(cookieValue = ''): CssColorTheme {
darkTheme: filterTheme(dark_theme) || defaultCSSTheme.darkTheme,
}
} catch (err) {
console.warn("Unable to parse 'color_mode' cookie", err)
if (process.env.NODE_ENV === 'development')
console.warn("Unable to parse 'color_mode' cookie", err)
return defaultCSSTheme
}
}

View File

@@ -1,7 +1,7 @@
/* eslint-disable camelcase */
import { v4 as uuidv4 } from 'uuid'
import Cookies from 'js-cookie'
import parseUserAgent from './user-agent'
import { parseUserAgent } from './user-agent'
const COOKIE_NAME = '_docs-events'

View File

@@ -43,9 +43,14 @@ export function getShellExample(operation: Operation, codeSample: CodeSample) {
}
}
let authHeader = '-H "Authorization: Bearer <YOUR-TOKEN>"'
if (operation.subcategory === 'management-console') {
authHeader = '-u "api_key:your-password"'
}
const args = [
operation.verb !== 'get' && `-X ${operation.verb.toUpperCase()}`,
`-H "Accept: ${defaultAcceptHeader}" \\ \n -H "Authorization: token <TOKEN>"`,
`-H "Accept: ${defaultAcceptHeader}" \\ \n ${authHeader}`,
`${operation.serverUrl}${requestPath}`,
requestBodyParams,
].filter(Boolean)
@@ -86,12 +91,12 @@ export function getGHExample(operation: Operation, codeSample: CodeSample) {
requestBodyParams = Object.keys(codeSample.request.bodyParameters)
.map((key) => {
if (typeof codeSample.request.bodyParameters[key] === 'string') {
return `-f ${key}='${codeSample.request.bodyParameters[key]}'\n`
return `-f ${key}='${codeSample.request.bodyParameters[key]}' `
} else {
return `-F ${key}=${codeSample.request.bodyParameters[key]}\n`
return `-F ${key}=${codeSample.request.bodyParameters[key]} `
}
})
.join(' ')
.join('\\\n ')
}
const args = [
operation.verb !== 'get' && `--method ${operation.verb.toUpperCase()}`,
@@ -141,11 +146,7 @@ export function getJSExample(operation: Operation, codeSample: CodeSample) {
}
}
const comment = `// Octokit.js\n// https://github.com/octokit/core.js#readme\n`
const require = `const octokit = new Octokit(${stringify(
{ auth: 'personal-access-token123' },
null,
2
)})\n\n`
const require = `const octokit = new Octokit(${stringify({ auth: 'YOUR-TOKEN' }, null, 2)})\n\n`
return `${comment}${require}await octokit.request('${operation.verb.toUpperCase()} ${
operation.requestPath

View File

@@ -19,7 +19,7 @@ const BROWSER_REGEXPS = [
/ms(ie)\/([^\s)]+)/i,
]
export default function parseUserAgent(ua = navigator.userAgent) {
export function parseUserAgent(ua = navigator.userAgent) {
ua = ua.toLowerCase()
const osRe = OS_REGEXPS.find((re) => re.test(ua))
let [, os = 'other', os_version = '0'] = (osRe && ua.match(osRe)) || []

View File

@@ -15,7 +15,7 @@ export const CodeLanguagePicker = () => {
<SubNav.Link
key={language.id}
as={Link}
href={`${routePath}?langId=${language.id}`}
href={`/${router.locale}${routePath}?langId=${language.id}`}
selected={language.id === currentLanguage.id}
>
{language.label}

View File

@@ -53,18 +53,26 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (<kbd>Shift</kbd> + <kbd>Command</kbd> + <kbd>P</kbd> / <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
![Codespaces: Add Development Container Configuration Files... in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
![Codespaces: Add Development Container Configuration Files... in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **C# (.NET)**. If you need additional features you can select any container thats specific to C# (.NET) or a combination of tools such as C# (.NET) and MS SQL.
![Select C# (.NET) option from the list](/assets/images/help/codespaces/add-dotnet-prebuilt-container.png)
![Select C# (.NET) option from the list](/assets/images/help/codespaces/add-dotnet-prebuilt-container.png)
3. Click the recommended version of .NET.
![.NET version selection](/assets/images/help/codespaces/add-dotnet-version.png)
![.NET version selection](/assets/images/help/codespaces/add-dotnet-version.png)
4. Accept the default option to add Node.js to your customization.
![Add Node.js selection](/assets/images/help/codespaces/dotnet-options.png)
![Add Node.js selection](/assets/images/help/codespaces/dotnet-options.png)
5. Select any additional features to install and click **OK**.
6. Access the command palette (<kbd>Shift</kbd> + <kbd>Command</kbd> + <kbd>P</kbd> / <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
6. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -197,13 +205,16 @@ const article: PlaygroundArticleT = {
"postCreateCommand": "dotnet restore",
\`\`\`
4. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
5. Check your changes were successfully applied by verifying the "Code Spell Checker" extension was installed.
![Extensions list](/assets/images/help/codespaces/dotnet-extensions.png)
![Extensions list](/assets/images/help/codespaces/dotnet-extensions.png)
`,
},
{
@@ -217,9 +228,10 @@ const article: PlaygroundArticleT = {
1. Run your application by pressing \`F5\` or entering \`dotnet watch run\` in your terminal.
2. When your project starts, you should see a message in the bottom right corner with a prompt to connect to the port your project uses.
2. When your project starts, you should see a message in the bottom right corner with a prompt to connect to the port your project uses.
![Port forwarding toast](/assets/images/help/codespaces/python-port-forwarding.png)
![Port forwarding toast](/assets/images/help/codespaces/python-port-forwarding.png)
`,
},
{

View File

@@ -51,15 +51,23 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Java**. In practice, you could select any container thats specific to Java or a combination of tools such as Java and Azure Functions.
![Select Java option from the list](/assets/images/help/codespaces/add-java-prebuilt-container.png)
![Select Java option from the list](/assets/images/help/codespaces/add-java-prebuilt-container.png)
3. Click the recommended version of Java.
![Java version selection](/assets/images/help/codespaces/add-java-version.png)
![Java version selection](/assets/images/help/codespaces/add-java-version.png)
4. Select any additional features to install and click **OK**.
5. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
5. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -185,9 +193,11 @@ const article: PlaygroundArticleT = {
For more information about \`devcontainer.json\` properties, see the Visual Studio Code documentation: "[devcontainer.json reference](https://code.visualstudio.com/docs/remote/devcontainerjson-reference)."
4. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
`,
},
{

View File

@@ -29,7 +29,7 @@ const article: PlaygroundArticleT = {
content: dedent`
1. Under the repository name, use the **Code** drop-down menu, and in the **Codespaces** tab, click **Create codespace on BRANCH**.
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
@@ -51,15 +51,23 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Node.js**. If you need additional features you can select any container thats specific to Node or a combination of tools such as Node and MongoDB.
![Select Node option from the list](/assets/images/help/codespaces/add-node-prebuilt-container.png)
![Select Node option from the list](/assets/images/help/codespaces/add-node-prebuilt-container.png)
3. Click the recommended version of Node.js.
![Node.js version selection](/assets/images/help/codespaces/add-node-version.png)
![Node.js version selection](/assets/images/help/codespaces/add-node-version.png)
4. Select any additional features to install and click **OK**.
5. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
5. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -173,7 +181,9 @@ const article: PlaygroundArticleT = {
With your dev container configuration added and a basic understanding of what everything does, you can now make changes to customize your environment further. In this example, you'll add properties to install npm when your codespace launches and make a list of ports inside the container available locally.
1. In the Explorer, select the \`devcontainer.json\` file from the tree to open it. You might have to expand the \`.devcontainer\` folder to see it.
![devcontainer.json file in the Explorer](/assets/images/help/codespaces/devcontainers-options.png)
![devcontainer.json file in the Explorer](/assets/images/help/codespaces/devcontainers-options.png)
2. Add the following lines to your \`devcontainer.json\` file after \`extensions\`:
\`\`\`js{:copy}
@@ -182,10 +192,11 @@ const article: PlaygroundArticleT = {
\`\`\`
For more information about \`devcontainer.json\` properties, see the Visual Studio Code documentation: "[devcontainer.json reference](https://code.visualstudio.com/docs/remote/devcontainerjson-reference)."
1. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
3. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
`,
},
{

View File

@@ -29,9 +29,9 @@ const article: PlaygroundArticleT = {
content: dedent`
1. Under the repository name, use the **Code** drop-down menu, and in the **Codespaces** tab, click **Create codespace on BRANCH**.
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
When you create a codespace, your project is created on a remote VM that is dedicated to you. By default, the container for your codespace has many languages and runtimes including Python, pip, and Miniconda. It also includes a common set of tools like git, wget, rsync, openssh, and nano.
@@ -53,17 +53,27 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Python 3**. If you need additional features you can select any container thats specific to Python or a combination of tools such as Python 3 and PostgreSQL.
![Select Python option from the list](/assets/images/help/codespaces/add-python-prebuilt-container.png)
![Select Python option from the list](/assets/images/help/codespaces/add-python-prebuilt-container.png)
3. Click the recommended version of Python.
![Python version selection](/assets/images/help/codespaces/add-python-version.png)
![Python version selection](/assets/images/help/codespaces/add-python-version.png)
4. Accept the default option to add Node.js to your customization.
![Add Node.js selection](/assets/images/help/codespaces/add-nodejs-selection.png)
![Add Node.js selection](/assets/images/help/codespaces/add-nodejs-selection.png)
5. Select any additional features to install and click **OK**.
6. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
6. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -197,7 +207,7 @@ const article: PlaygroundArticleT = {
"postCreateCommand": "pip3 install --user -r requirements.txt",
\`\`\`
4. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)

View File

@@ -60,14 +60,17 @@ export function RestCodeSamples({ operation, slug }: Props) {
}))
// Menu options for the language selector
const languageSelectOptions: LanguageOptionT[] = [
{ key: CURLKEY, text: 'cURL' },
{ key: JSKEY, text: 'JavaScript' },
]
// Not all examples support the GH CLI language option. If any of
// the examples don't support it, we don't show GH CLI as an option.
if (!languageExamples.some((example) => example.ghcli === undefined)) {
languageSelectOptions.push({ key: GHCLIKEY, text: 'GitHub CLI' })
const languageSelectOptions: LanguageOptionT[] = [{ key: CURLKEY, text: 'cURL' }]
// Management Console operations are not supported by Octokit
if (operation.subcategory !== 'management-console') {
languageSelectOptions.push({ key: JSKEY, text: 'JavaScript' })
// Not all examples support the GH CLI language option. If any of
// the examples don't support it, we don't show GH CLI as an option.
if (!languageExamples.some((example) => example.ghcli === undefined)) {
languageSelectOptions.push({ key: GHCLIKEY, text: 'GitHub CLI' })
}
}
// Menu options for the example selector

View File

@@ -13,6 +13,6 @@
table-layout: fixed !important;
}
.codeBlock code {
.codeBlock code:not(td *) {
word-break: break-all;
}

View File

@@ -1,43 +1,42 @@
import { Heading, NavList } from '@primer/react'
import cx from 'classnames'
import { ActionList, Heading } from '@primer/react'
import type { MiniTocItem } from 'components/context/ArticleContext'
import { MiniTocItem } from 'components/context/ArticleContext'
import { Link } from 'components/Link'
import { useTranslation } from 'components/hooks/useTranslation'
import styles from './Minitocs.module.scss'
export type MiniTocsPropsT = {
pageTitle: string
miniTocItems: MiniTocItem[]
}
const renderTocItem = (item: MiniTocItem) => {
function RenderTocItem(item: MiniTocItem) {
return (
<ActionList.Item
as="li"
key={item.contents.href}
className={item.platform}
sx={{
listStyle: 'none',
padding: '2px',
':hover': {
bg: 'var(--color-canvas-inset) !important',
},
'ul > li': {
':hover': {
bg: 'var(--color-neutral-subtle) !important',
},
},
}}
>
<div className={cx('lh-condensed d-block width-full')}>
<a className="d-block width-auto" href={item.contents.href}>
{item.contents.title}
</a>
{item.items && item.items.length > 0 ? (
<ul className="ml-3">{item.items.map(renderTocItem)}</ul>
) : null}
</div>
</ActionList.Item>
<div className={cx(styles.nested, item.platform)}>
<NavList.Item
href={item.contents.href}
sx={{
padding: '4px 0 4px 0',
marginLeft: '7px',
}}
>
{item.contents.title}
</NavList.Item>
{item.items && item.items.length > 0 && (
<ul className={cx(styles.indentNested)}>
{item.items.map((toc) => (
<RenderTocItem
key={toc.contents.href}
contents={toc.contents}
items={toc.items}
platform={toc.platform}
/>
))}
</ul>
)}
</div>
)
}
@@ -46,17 +45,22 @@ export function MiniTocs({ pageTitle, miniTocItems }: MiniTocsPropsT) {
return (
<>
<Heading as="h2" id="in-this-article" className="mb-1" sx={{ fontSize: 1 }}>
<Heading as="h2" id="in-this-article" className="mb-1 ml-3" sx={{ fontSize: 1 }}>
<Link href="#in-this-article">{t('miniToc')}</Link>
</Heading>
<ActionList variant="full" className="my-2" key={pageTitle} as="div">
<div>
{miniTocItems.map((items, i) => {
return <ul key={pageTitle + i}>{renderTocItem(items)}</ul>
})}
</div>
</ActionList>
<NavList className="my-2" key={pageTitle}>
{miniTocItems.map((items, i) => {
return (
<RenderTocItem
key={items.contents.href + i}
contents={items.contents}
items={items.items}
platform={items.platform}
/>
)
})}
</NavList>
</>
)
}

View File

@@ -0,0 +1,12 @@
.indentNested {
padding-inline-start: 0;
}
.nested {
div ul div li {
padding-left: 4em;
}
div li {
padding-left: 2em;
}
}

View File

@@ -227,7 +227,7 @@ defaultPlatform: linux
### `defaultTool`
- Purpose: Override the initial tool selection for a page, where tool refers to the application the reader is using to work with GitHub (such as GitHub.com's web UI, the GitHub CLI, or GitHub Desktop) or the GitHub APIs (such as cURL or the GitHub CLI). For more information about the tool selector, see [Markup reference for GitHub Docs](../contributing/content-markup-reference.md#tool-tags). If this frontmatter is omitted, then the tool-specific content matching the GitHub web UI is shown by default. If a user has indicated a tool preference (by clicking on a tool tab), then the user's preference will be applied instead of the default value.
- Purpose: Override the initial tool selection for a page, where the tool refers to the application the reader is using to work with GitHub (such as GitHub.com's web UI, the GitHub CLI, or GitHub Desktop) or the GitHub APIs (such as cURL or the GitHub CLI). For more information about the tool selector, see [Markup reference for GitHub Docs](../contributing/content-markup-reference.md#tool-tags). If this frontmatter is omitted, then the tool-specific content matching the GitHub web UI is shown by default. If a user has indicated a tool preference (by clicking on a tool tab), then the user's preference will be applied instead of the default value.
- Type: `String`, one of: `webui`, `cli`, `desktop`, `curl`, `codespaces`, `vscode`, `importer_cli`, `graphql`, `powershell`, `bash`, `javascript`.
- Optional.
@@ -263,9 +263,9 @@ includeGuides:
- Optional.
### `topics`
- Purpose: Indicate the topics covered by the article. The topics are used to filter guides on some landing pages. For example, the guides at the bottom of [this page](https://docs.github.com/en/actions/guides) can be filtered by topics and the topics are listed under the guide intro. Topics are also added to all search records that get created for each page. The search records contain a `topics` property that is used to filter search results by topics. For more information, see the [Search](/contributing/search.md) contributing guide. Refer to the content models for more details around adding topics. A full list of existing topics is located in the [allowed topics file](/data/allowed-topics.js). If topics in article frontmatter and the allow-topics list become out of sync, the [topics CI test](/tests/unit/search/topics.js) will fail.
- Purpose: Indicate the topics covered by the article. The topics are used to filter guides on some landing pages. For example, the guides at the bottom of [this page](https://docs.github.com/en/actions/guides) can be filtered by topics, and the topics are listed under the guide intro. Topics are also added to all search records that get created for each page. The search records contain a `topics` property that is used to filter search results by topics. For more information, see the [Search](/contributing/search.md) contributing guide. Refer to the content models for more details about adding topics. A full list of existing topics is located in the [allowed topics file](/data/allowed-topics.js). If topics in article frontmatter and the allow-topics list become out of sync, the [topics CI test](/tests/unit/search/topics.js) will fail.
- Type: Array of `String`s
- Optional: Topics are preferred for each article, but, there may be cases where existing articles don't yet have topics or adding a topic to a new article may not add value.
- Optional: Topics are preferred for each article, but, there may be cases where existing articles don't yet have topics, or adding a topic to a new article may not add value.
### `contributor`
- Purpose: Indicate an article is contributed and maintained by a third-party organization, typically a GitHub Technology Partner.
@@ -294,7 +294,7 @@ contributor:
If you see two single quotes in a row (`''`) in YML frontmatter where you might expect to see one (`'`), this is the YML-preferred way to escape a single quote. From [the YAML spec](https://yaml.org/spec/history/2001-12-10.html):
> In single quoted leaves, a single quote character needs to be escaped. This is done by repeating the character.
> In single-quoted leaves, a single quote character needs to be escaped. This is done by repeating the character.
As an alternative, you can change the single quotes surrounding the frontmatter field to double quotes and leave interior single quotes unescaped.
@@ -314,7 +314,7 @@ Make sure not to add hardcoded "In this article" sections in the Markdown source
A content file can have **two** types of versioning:
* [`versions`](#versions) frontmatter (**required**)
* Determines in which the versions the page is available. See [contributing/permalinks](../contributing/permalinks.md) for more info.
* Determines in which versions the page is available. See [contributing/permalinks](../contributing/permalinks.md) for more info.
* Liquid statements in content (**optional**)
* Conditionally render content depending on the current version being viewed. See [contributing/liquid-helpers](../contributing/liquid-helpers.md) for more info. Note Liquid conditionals can also appear in `data` and `include` files.
@@ -358,7 +358,7 @@ and when viewed on GitHub Enterprise Server docs, the version is included as wel
### Preventing transformations
Sometimes you want to link to a Dotcom-only article in Enterprise content and you don't want the link to be Enterprise-ified. To prevent the transformation, include the preferred version in the path.
Sometimes you want to link to a Dotcom-only article in Enterprise content and you don't want the link to be Enterprise-ified. To prevent the transformation, you should include the preferred version in the path.
```markdown
"[GitHub's Terms of Service](/free-pro-team@latest/github/site-policy/github-terms-of-service)"
@@ -388,8 +388,8 @@ The homepage is the main Table of Contents file for the docs site. The homepage
To create a product guides page (e.g. [Actions' Guide page](https://docs.github.com/en/actions/guides)), create or modify an existing markdown file with these specific frontmatter values:
1. Use the product guides page template by referencing it `layout: product-guides`
2. (optional) Include the learning tracks in [`learningTracks`](#learningTracks)
1. Use the product guides page template by referencing `layout: product-guides`.
2. (optional) Include the learning tracks in [`learningTracks`](#learningTracks).
3. (optional) Define which articles to include with [`includeGuides`](#includeGuides).
If using learning tracks, they need to be defined in [`data/learning-tracks/*.yml`](../data/learning-tracks/README.md).

View File

@@ -25,6 +25,7 @@ children:
- /managing-the-default-branch-name-for-your-repositories
- /managing-security-and-analysis-settings-for-your-personal-account
- /managing-access-to-your-personal-accounts-project-boards
- /managing-your-cookie-preferences-for-githubs-enterprise-marketing-pages
- /integrating-jira-with-your-personal-projects
- /what-does-the-available-for-hire-checkbox-do
shortTitle: Personal account settings

View File

@@ -0,0 +1,33 @@
---
title: Managing your cookie preferences for GitHub's enterprise marketing pages
intro: "You can control how {% data variables.product.company_short %} uses information from non-essential tracking cookies for enterprise marketing pages."
versions:
fpt: '*'
ghes: '*'
ghae: '*'
ghec: '*'
topics:
- Accounts
shortTitle: Manage cookie preferences
---
## About cookie preferences on enterprise marketing pages
{% data variables.product.company_short %} may use non-essential cookies on some enterprise marketing pages. You can customize how these cookies behave. For more information about how {% data variables.product.company_short %} uses cookies, see "[{% data variables.product.company_short %} Privacy Statement](/free-pro-team@latest/site-policy/privacy-policies/github-privacy-statement)."
## Changing your cookie preferences
You can customize how non-essential cookies behave on any {% data variables.product.company_short %} enterprise marketing page.
1. Navigate to the {% data variables.product.company_short %} enterprise marketing page where you'd like to change your cookie preferences. For example, navigate to [{% data variables.product.company_short %} Resources](https://resources.github.com/).
1. Scroll to the bottom of the page, then click **Manage Cookies**.
![Screenshot of button to manage cookie settings.](/assets/images/help/settings/cookie-settings-manage.png)
1. Under "Manage cookie preferences," to accept or reject each non-essential cookie, click **Accept** or **Reject**.
![Screenshot of radio buttons to choose "Accept" or "Reject" for non-essential cookies.](/assets/images/help/settings/cookie-settings-accept-or-reject.png)
1. Click **Save changes**.
![Screenshot of button to save changes.](/assets/images/help/settings/cookie-settings-save.png)

View File

@@ -145,11 +145,11 @@ With OIDC, a {% data variables.product.prodname_actions %} workflow requires a t
Audience and Subject claims are typically used in combination while setting conditions on the cloud role/resources to scope its access to the GitHub workflows.
- **Audience**: By default, this value uses the URL of the organization or repository owner. This can be used to set a condition that only the workflows in the specific organization can access the cloud role.
- **Subject**: Has a predefined format and is a concatenation of some of the key metadata about the workflow, such as the {% data variables.product.prodname_dotcom %} organization, repository, branch, or associated [`job`](/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenvironment) environment. See "[Example subject claims](#example-subject-claims)" to see how the subject claim is assembled from concatenated metadata.
- **Subject**: By default, has a predefined format and is a concatenation of some of the key metadata about the workflow, such as the {% data variables.product.prodname_dotcom %} organization, repository, branch, or associated [`job`](/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenvironment) environment. See "[Example subject claims](#example-subject-claims)" to see how the subject claim is assembled from concatenated metadata.
There are also many additional claims supported in the OIDC token that can also be used for setting these conditions.
If you need more granular trust conditions, you can customize the issuer (`iss`) and subject (`sub`) claims that are included with the JWT. For more information, see "[Customizing the token claims](#customizing-the-token-claims)".
In addition, your cloud provider could allow you to assign a role to the access tokens, letting you specify even more granular permissions.
There are also many additional claims supported in the OIDC token that can be used for setting these conditions. In addition, your cloud provider could allow you to assign a role to the access tokens, letting you specify even more granular permissions.
{% note %}
@@ -243,9 +243,13 @@ curl -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "$ACTIONS_ID_TOK
{% ifversion actions-oidc-hardening-config %}
## Customizing the token claims
You can security harden your OIDC configuration by customizing the claims that are included with the JWT. This allows your cloud provider to apply more granular trust conditions when determining whether to grant access to its resources. For example, {% ifversion ghec %}you can customize the issuer (`iss`) claim to only allow access from a specific enterprise URL, and {% endif %}you can customize the subject (`sub`) value to require that requests originate from a specific repository, reusable workflow, or other source.
You can security harden your OIDC configuration by customizing the claims that are included with the JWT. These customisations allow you to define more granular trust conditions on your cloud roles when allowing your workflows to access resources hosted in the cloud:
To configure the claim conditions on {% data variables.product.prodname_dotcom %}, you can use the REST API endpoints described in the following sections.
{% ifversion ghec %} - For an additional layer of security, you can append the `issuer` url with your enterprise slug. This lets you set conditions on the issuer (`iss`) claim, configuring it to only accept JWT tokens from a unique `issuer` URL that must include your enterprise slug.{% endif %}
- You can standardize your OIDC configuration by setting conditions on the subject (`sub`) claim that require JWT tokens to originate from a specific repository, reusable workflow, or other source.
- You can define granular OIDC policies by using additional OIDC token claims, such as `repository_id` and `repo_visibility`. For more information, see "[Understanding the OIDC token](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#understanding-the-oidc-token)".
To customize these claim formats, organization and repository admins can use the REST API endpoints described in the following sections.
{% ifversion ghec %}
@@ -280,19 +284,21 @@ After this setting is applied, the JWT will contain the updated `iss` value. In
To configure organization-wide security, compliance, and standardization, you can customize the standard claims to suit your required access conditions. If your cloud provider supports conditions on subject claims, you can create a condition that checks whether the `sub` value matches the path of the reusable workflow, such as `"job_workflow_ref: "octo-org/octo-automation/.github/workflows/oidc.yml@refs/heads/main""`. The exact format will vary depending on your cloud provider's OIDC configuration. To configure the matching condition on {% data variables.product.prodname_dotcom %}, you can can use the REST API to require that the `sub` claim must always include a specific custom claim, such as `job_workflow_ref`. For more information, see "[Set the customization template for an OIDC subject claim for an organization](/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-an-organization)."
Customizing the claims results in a new format for the entire `sub` claim, which replaces the default predefined `sub` format in the token described in "[Example subject claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims)."
The following example templates demonstrate various ways to customize the subject claim. To configure these settings on {% data variables.product.prodname_dotcom %}, organization admins use the REST API to specify a list of claims that must be included in the subject (`sub`) claim. {% data reusables.actions.use-request-body-api %}
To customize your subject claims, you should first create a matching condition in your cloud provider's OIDC configuration, before adding the configuration using the REST API. Once the configuration is completed, each time a new job runs, the OIDC token generated during that job will follow the new customization template. If the matching condition doesn't exist in the cloud provider's OIDC configuration before the job runs, the generated token might not be accepted by the cloud provider, since the cloud conditions may not be synchronized.
To customize your subject claims, you should first create a matching condition in your cloud provider's OIDC configuration, before customizing the configuration using the REST API. Once the configuration is completed, each time a new job runs, the OIDC token generated during that job will follow the new customization template. If the matching condition doesn't exist in the cloud provider's OIDC configuration before the job runs, the generated token might not be accepted by the cloud provider, since the cloud conditions may not be synchronized.
{% note %}
**Note**: When the organization template is applied, it will not affect any existing repositories that already use OIDC. For new repositories that are created after the template has been applied, the repository owner will need to opt-in to receive this configuration. For more information, see "[Set the opt-in flag of an OIDC subject claim customization for a repository](/rest/actions/oidc#set-the-opt-in-flag-of-an-oidc-subject-claim-customization-for-a-repository)."
**Note**: When the organization template is applied, it will not affect any existing repositories that already use OIDC. For existing repositories, as well as any new repositories that are created after the template has been applied, the repository owner will need to opt-in to receive this configuration. For more information, see "[Set the opt-in flag of an OIDC subject claim customization for a repository](/rest/actions/oidc#set-the-opt-in-flag-of-an-oidc-subject-claim-customization-for-a-repository)."
{% endnote %}
#### Example: Allowing repository based on visibility and owner
This example template enables cloud access based on repository visibility and owner, letting you restrict cloud role access to only private repositories within an organization or enterprise. {% data reusables.actions.use-request-body-api %}
This example template allows the `sub` claim to have a new format, using `repository_owner` and `repository_visibility`:
```json
{
@@ -303,11 +309,11 @@ This example template enables cloud access based on repository visibility and ow
}
```
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repository_owner` and `repository_visibility`. For example: `"repository_owner: "monalisa":repository_visibility:private"`.
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repository_owner` and `repository_visibility`. For example: `"repository_owner: "monalisa":repository_visibility:private"`. The approach lets you restrict cloud role access to only private repositories within an organization or enterprise.
#### Example: Allowing access to all repositories with a specific owner
This example template grants access to all repositories with a specified `repository_owner`. {% data reusables.actions.use-request-body-api %}
This example template enables the `sub` claim to have a new format with only the value of `repository_owner`. {% data reusables.actions.use-request-body-api %}
```json
{
@@ -322,7 +328,9 @@ In your cloud provider's OIDC configuration, configure the `sub` condition to re
#### Example: Requiring a reusable workflow
This example template requires a specific reusable workflow in a claim, letting an enterprise enforce consistent deployments across its enterprise, organizations, and repositories. {% data reusables.actions.use-request-body-api %}
This example template allows the `sub` claim to have a new format that contains the value of the `job_workflow_ref` claim. This enables an enterprise to use [reusable workflows](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims) to enforce consistent deployments across its organizations and repositories.
{% data reusables.actions.use-request-body-api %}
```json
{
@@ -336,7 +344,9 @@ In your cloud provider's OIDC configuration, configure the `sub` condition to re
#### Example: Requiring a reusable workflow and other claims
This example template combines the requirement of a specific reusable workflow with additional claims. {% data reusables.actions.use-request-body-api %}
The following example template combines the requirement of a specific reusable workflow with additional claims. {% data reusables.actions.use-request-body-api %}
This example also demonstrates how to use `"context"` to define your conditions. This is the part that follows the repository in the [default `sub` format](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims). For example, when the job references an environment, the context contains: `environment:<environmentName>`.
```json
{
@@ -350,6 +360,9 @@ This example template combines the requirement of a specific reusable workflow w
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repo`, `context`, and `job_workflow_ref`.
This customization template requires that the `sub` uses the following format: `repo:<orgName/repoName>:environment:<environmentName>:job_workflow_ref:<reusableWorkflowPath>`.
For example: `"sub": "repo:octo-org/octo-repo:environment:prod:job_workflow_ref:octo-org/octo-automation/.github/workflows/oidc.yml@refs/heads/main"`
#### Example: Granting access to a specific repository
This example template lets you grant cloud access to all the workflows in a specific repository, across all branches/tags and environments. To help improve security, combine this template with the custom issuer URL described in "[Customizing the token URL for an enterprise](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-url-for-an-enterprise)."

View File

@@ -23,11 +23,18 @@ topics:
Rather than copying and pasting deployment jobs from one workflow to another, you can create a reusable workflow that performs the deployment steps. A reusable workflow can be used by another workflow if it meets one of the access requirements described in "[Reusing workflows](/actions/learn-github-actions/reusing-workflows#access-to-reusable-workflows)."
When combined with OpenID Connect (OIDC), reusable workflows let you enforce consistent deployments across your repository, organization, or enterprise. You can do this by defining trust conditions on cloud roles based on reusable workflows.
You should be familiar with the concepts described in "[Reusing workflows](/actions/learn-github-actions/reusing-workflows" and "[About security hardening with OpenID Connect](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect)."
In order to create trust conditions based on reusable workflows, your cloud provider must support custom claims for `job_workflow_ref`. This allows your cloud provider to identify which repository the job originally came from. If your cloud provider only supports the standard claims (_audience_ and _subject_), it will not be able to determine that the job originated from the reusable workflow repository. Cloud providers that support `job_workflow_ref` include Google Cloud Platform and HashiCorp Vault.
## Defining the trust conditions
Before proceeding, you should be familiar with the concepts of [reusable workflows](/actions/learn-github-actions/reusing-workflows) and [OpenID Connect](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect).
When combined with OpenID Connect (OIDC), reusable workflows let you enforce consistent deployments across your repository, organization, or enterprise. You can do this by defining trust conditions on cloud roles based on reusable workflows. The available options will vary depending on your cloud provider:
- **Using `job_workflow_ref`**:
- To create trust conditions based on reusable workflows, your cloud provider must support custom claims for `job_workflow_ref`. This allows your cloud provider to identify which repository the job originally came from.
- For clouds that only support the standard claims (audience (`aud`) and subject (`sub`)), you can use the API to customize the `sub` claim to include `job_workflow_ref`. For more information, see "[Customizing the token claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-claims)". Support for custom claims is currently available for Google Cloud Platform and HashiCorp Vault.
- **Customizing the token claims**:
- You can configure more granular trust conditions by customizing the issuer (`iss`) and subject (`sub`) claims included with the JWT. For more information, see "[Customizing the token claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-claims)".
## How the token works with reusable workflows

View File

@@ -71,7 +71,7 @@ You can add self-hosted runners at the organization level, where they can be use
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runners %}
1. Click **New runner**.
{% ifversion actions-hosted-runners %}1. Click **New runner**, then click **New self-hosted runner**.{% else %}1. Click **New runner**.{% endif %}
{% data reusables.actions.self-hosted-runner-configure %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.organizations.navigate-to-org %}

View File

@@ -18,15 +18,24 @@ shortTitle: Run runner app on startup
{% capture service_first_step %}1. Stop the self-hosted runner application if it is currently running.{% endcapture %}
{% capture service_non_windows_intro_shell %}On the runner machine, open a shell in the directory where you installed the self-hosted runner application. Use the commands below to install and manage the self-hosted runner service.{% endcapture %}
{% capture service_nonwindows_intro %}You must add a runner to {% data variables.product.product_name %} before you can configure the self-hosted runner application as a service. For more information, see "[Adding self-hosted runners](/github/automating-your-workflow-with-github-actions/adding-self-hosted-runners)."{% endcapture %}
{% capture service_win_name %}actions.runner.*{% endcapture %}
{% capture service_nonwindows_intro %}
{% note %}
**Note:** You must add a runner to {% data variables.product.product_name %} before you can configure the self-hosted runner application as a service.
For more information, see "[Adding self-hosted runners](/github/automating-your-workflow-with-github-actions/adding-self-hosted-runners)."
{% endnote %}
{% endcapture %}
{% capture service_win_name %}actions.runner.*{% endcapture %}
{% linux %}
{{ service_nonwindows_intro }}
For Linux systems that use `systemd`, you can use the `svc.sh` script distributed with the self-hosted runner application to install and manage using the application as a service.
For Linux systems that use `systemd`, you can use the `svc.sh` script that is created after successfully adding the runner to install and manage using the application as a service.
{{ service_non_windows_intro_shell }}

View File

@@ -9,216 +9,65 @@ versions:
ghae: '*'
ghec: '*'
type: tutorial
shortTitle: Manage access to runners
shortTitle: Using runner groups
---
{% data reusables.actions.enterprise-beta %}
{% data reusables.actions.enterprise-github-hosted-runners %}
## About self-hosted runner groups
## About runner groups
{% ifversion fpt %}
{% note %}
**Note:** All organizations have a single default self-hosted runner group. Only enterprise accounts and organizations owned by enterprise accounts can create and manage additional self-hosted runner groups.
{% endnote %}
Self-hosted runner groups are used to control access to self-hosted runners. Organization admins can configure access policies that control which repositories in an organization have access to the runner group.
If you use {% data variables.product.prodname_ghe_cloud %}, you can create additional runner groups; enterprise admins can configure access policies that control which organizations in an enterprise have access to the runner group; and organization admins can assign additional granular repository access policies to the enterprise runner group. For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups).
{% endif %}
{% data reusables.actions.about-runner-groups %} {% ifversion fpt %}For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups).{% endif %}
{% ifversion ghec or ghes or ghae %}
Self-hosted runner groups are used to control access to self-hosted runners at the organization and enterprise level. Enterprise owners can configure access policies that control which organizations {% ifversion restrict-groups-to-workflows %}and workflows {% endif %}in an enterprise have access to the runner group. Organization owners can configure access policies that control which repositories{% ifversion restrict-groups-to-workflows %} and workflows{% endif %} in an organization have access to the runner group.
When an enterprise owner grants an organization access to a runner group, organization owners can see the runner group listed in the organization's self-hosted runner settings. The organization owners can then assign additional granular repository{% ifversion restrict-groups-to-workflows %} and workflow{% endif %} access policies to the enterprise runner group.
When new runners are created, they are automatically assigned to the default group. Runners can only be in one group at a time. You can move runners from the default group to another group. For more information, see "[Moving a self-hosted runner to a group](#moving-a-self-hosted-runner-to-a-group)."
## Creating a self-hosted runner group for an organization
All organizations have a single default self-hosted runner group. Organizations within an enterprise account can create additional self-hosted groups. Organization admins can allow individual repositories access to a runner group. For information about how to create a self-hosted runner group with the REST API, see "[Self-hosted runner groups](/rest/reference/actions#self-hosted-runner-groups)."
{%- ifversion ghec or ghes %}
Self-hosted runners are automatically assigned to the default group when created, and can only be members of one group at a time. You can move a runner from the default group to any group you create.
{% data reusables.actions.self-hosted-runner-security-admonition %}
When creating a group, you must choose a policy that defines which repositories{% ifversion restrict-groups-to-workflows %} and workflows{% endif %} have access to the runner group.
{%- endif %}
{% ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runner-groups %}
1. In the "Runner groups" section, click **New runner group**.
1. Enter a name for your runner group.
{% data reusables.actions.runner-group-assign-policy-repo %}
{% warning %}
**Warning**: {% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{% data reusables.actions.runner-group-assign-policy-workflow %}{%- ifversion restrict-groups-to-workflows %} Organization-owned runner groups cannot access workflows from a different organization in the enterprise; instead, you must create an enterprise-owned runner group.{% endif %}
{% data reusables.actions.self-hosted-runner-create-group %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runner-groups %}
1. Under {% ifversion ghes or ghae %}"Runners"{% endif %}, click **Add new**, and then **New group**.
![Add runner group](/assets/images/help/settings/actions-org-add-runner-group.png)
1. Enter a name for your runner group, and assign a policy for repository access.
You can configure a runner group to be accessible to a specific list of repositories, or to all repositories in the organization.{% ifversion ghec or ghes %} By default, only private repositories can access runners in a runner group, but you can override this. This setting can't be overridden if configuring an organization's runner group that was shared by an enterprise.{% endif %}
{%- ifversion ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
![Add runner group options](/assets/images/help/settings/actions-org-add-runner-group-options.png)
1. Click **Save group** to create the group and apply the policy.
{% endif %}
{% data reusables.actions.creating-a-runner-group-for-an-organization %}
## Creating a self-hosted runner group for an enterprise
Enterprises can add their self-hosted runners to groups for access management. Enterprises can create groups of self-hosted runners that are accessible to specific organizations in the enterprise account{% ifversion restrict-groups-to-workflows %} or to specific workflows{% endif %}. Organization owners can then assign additional granular repository{% ifversion restrict-groups-to-workflows %} or workflow{% endif %} access policies to the enterprise runner groups. For information about how to create a self-hosted runner group with the REST API, see the enterprise endpoints in the [{% data variables.product.prodname_actions %} REST API](/rest/reference/actions#self-hosted-runner-groups).
{%- ifversion ghec or ghes %}
Self-hosted runners are automatically assigned to the default group when created, and can only be members of one group at a time. You can assign the runner to a specific group during the registration process, or you can later move the runner from the default group to a custom group.
{% data reusables.actions.self-hosted-runner-security-admonition %}
When creating a group, you must choose a policy that defines which organizations have access to the runner group.
{%- endif %}
{% data reusables.actions.self-hosted-runner-groups-add-to-enterprise-first-steps %}
1. To choose a policy for organization access, select the **Organization access** drop-down, and click a policy. You can configure a runner group to be accessible to a specific list of organizations, or all organizations in the enterprise.{% ifversion ghes %} By default, only private repositories can access runners in a runner group, but you can override this.{% endif %}
{%- ifversion ghec or ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
{%- ifversion ghec or ghes %}
![Add runner group options](/assets/images/help/settings/actions-enterprise-account-add-runner-group-options.png)
{%- elsif ghae %}
![Add runner group options](/assets/images/help/settings/actions-enterprise-account-add-runner-group-options-ae.png)
{%- endif %}
{% data reusables.actions.runner-group-assign-policy-workflow %}
1. Click **Save group** to create the group and apply the policy.
{% data reusables.actions.creating-a-runner-group-for-an-enterprise %}
{% endif %}
## Changing the access policy of a self-hosted runner group
For runner groups in an enterprise, you can change what organizations in the enterprise can access a runner group{% ifversion restrict-groups-to-workflows %} or restrict what workflows a runner group can run{% endif %}. For runner groups in an organization, you can change what repositories in the organization can access a runner group{% ifversion restrict-groups-to-workflows %} or restrict what workflows a runner group can run{% endif %}.
{%- ifversion fpt or ghec or ghes %}
### Changing what organizations or repositories can access a runner group
{% data reusables.actions.self-hosted-runner-security-admonition %}
{% ifversion fpt or ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. For runner groups in an enterprise, under **Organization access**, modify what organizations can access the runner group. For runner groups in an organization, under **Repository access**, modify what repositories can access the runner group.
{%- endif %}
{%- ifversion fpt or ghec or ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.actions.self-hosted-runner-configure-runner-group-access %}
{% endif %}
{% ifversion restrict-groups-to-workflows %}
### Changing what workflows can access a runner group
You can configure a self-hosted runner group to run either selected workflows or all workflows. For example, you might use this setting to protect secrets that are stored on self-hosted runners or to standardize deployment workflows by restricting a runner group to run only a specific reusable workflow. This setting cannot be overridden if you are configuring an organization's runner group that was shared by an enterprise.
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. Under **Workflow access**, select the dropdown menu and click **Selected workflows**.
1. Click {% octicon "gear" aria-label="the gear icon" %}.
1. Enter a comma separated list of the workflows that can access the runner group. Use the full path, including the repository name and owner. Pin the workflow to a branch, tag, or full SHA. For example: `octo-org/octo-repo/.github/workflows/build.yml@v2, octo-org/octo-repo/.github/workflows/deploy.yml@d6dc6c96df4f32fa27b039f2084f576ed2c5c2a5, monalisa/octo-test/.github/workflows/test.yml@main`.
Only jobs directly defined within the selected workflows will have access to the runner group.
Organization-owned runner groups cannot access workflows from a different organization in the enterprise; instead, you must create an enterprise-owned runner group.
1. Click **Save**.
{% endif %}
{% data reusables.actions.changing-the-access-policy-of-a-runner-group %}
## Changing the name of a runner group
{% ifversion fpt or ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. Change the runner group name.
{% elsif ghae or ghes < 3.4 %}
{% data reusables.actions.self-hosted-runner-configure-runner-group %}
1. Change the runner group name.
{% endif %}
{% data reusables.actions.changing-the-name-of-a-runner-group %}
{% ifversion ghec or ghes or ghae %}
## Automatically adding a self-hosted runner to a group
You can use the configuration script to automatically add a new self-hosted runner to a group. For example, this command registers a new self-hosted runner and uses the `--runnergroup` parameter to add it to a group named `rg-runnergroup`.
```sh
./config.sh --url $org_or_enterprise_url --token $token --runnergroup rg-runnergroup
```
The command will fail if the runner group doesn't exist:
```
Could not find any self-hosted runner group named "rg-runnergroup".
```
{% data reusables.actions.automatically-adding-a-runner-to-a-group %}
## Moving a self-hosted runner to a group
If you don't specify a runner group during the registration process, your new self-hosted runners are automatically assigned to the default group, and can then be moved to another group.
{% data reusables.actions.self-hosted-runner-navigate-to-org-enterprise %}
{% ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
1. In the "Runners" list, click the runner that you want to configure.
2. Select the **Runner group** drop-down.
3. In "Move runner to group", choose a destination group for the runner.
{% elsif ghae or ghes < 3.4 %}
1. In the {% ifversion ghes or ghae %}"Runner groups"{% endif %} section of the settings page, locate the current group of the runner you want to move and expand the list of group members.
![View runner group members](/assets/images/help/settings/actions-org-runner-group-members.png)
2. Select the checkbox next to the self-hosted runner, and then click **Move to group** to see the available destinations.
![Runner group member move](/assets/images/help/settings/actions-org-runner-group-member-move.png)
3. To move the runner, click on the destination group.
![Runner group member move](/assets/images/help/settings/actions-org-runner-group-member-move-destination.png)
{% endif %}
{% data reusables.actions.moving-a-runner-to-a-group %}
## Removing a self-hosted runner group
Self-hosted runners are automatically returned to the default group when their group is removed.
{% ifversion ghes or ghae or ghec %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
1. In the list of groups, to the right of the group you want to delete, click {% octicon "kebab-horizontal" aria-label="The horizontal kebab icon" %}.
2. To remove the group, click **Remove group**.
3. Review the confirmation prompts, and click **Remove this runner group**.
{% data reusables.actions.removing-a-runner-group %}
{% endif %}
{% endif %}

View File

@@ -425,11 +425,11 @@ This example `jobs` context contains the result and outputs of a job from a reus
```json
{
example_job: {
result: success,
outputs: {
output1: hello,
output2: world
"example_job": {
"result": "success",
"outputs": {
"output1": "hello",
"output2": "world"
}
}
}
@@ -488,7 +488,7 @@ The `steps` context contains information about the steps in the current job that
This example `steps` context shows two previous steps that had an [`id`](/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idstepsid) specified. The first step had the `id` named `checkout`, the second `generate_number`. The `generate_number` step had an output named `random_number`.
```yaml
```json
{
"checkout": {
"outputs": {},
@@ -549,7 +549,7 @@ The `runner.workspace` property is purposefully not documented. It is an early A
The following example context is from a Linux {% data variables.product.prodname_dotcom %}-hosted runner.
```yaml
```json
{
"os": "Linux",
"arch": "X64",
@@ -606,7 +606,7 @@ The `secrets` context contains the names and values of secrets that are availabl
The following example contents of the `secrets` context shows the automatic `GITHUB_TOKEN`, as well as two other secrets available to the workflow run.
```yaml
```json
{
"github_token": "***",
"NPM_TOKEN": "***",
@@ -634,7 +634,7 @@ For workflows with a matrix, the `strategy` context contains information about t
The following example contents of the `strategy` context is from a matrix with four jobs, and is taken from the final job. Note the difference between the zero-based `job-index` number, and `job-total` which is not zero-based.
```yaml
```json
{
"fail-fast": true,
"job-index": 3,
@@ -683,7 +683,7 @@ There are no standard properties in the `matrix` context, only those which are d
The following example contents of the `matrix` context is from a job in a matrix that has the `os` and `node` matrix properties defined in the workflow. The job is executing the matrix combination of an `ubuntu-latest` OS and Node.js version `16`.
```yaml
```json
{
"os": "ubuntu-latest",
"node": 16
@@ -732,7 +732,7 @@ The `needs` context contains outputs from all jobs that are defined as a depende
The following example contents of the `needs` context shows information for two jobs that the current job depends on.
```yaml
```json
{
"build": {
"result": "success",
@@ -800,7 +800,7 @@ There are no standard properties in the `inputs` context, only those which are d
The following example contents of the `inputs` context is from a workflow that has defined the `build_id`, `deploy_target`, and `perform_deploy` inputs.
```yaml
```json
{
"build_id": 123456768,
"deploy_target": "deployment_sys_1a",

View File

@@ -2,6 +2,7 @@
title: Understanding GitHub Actions
shortTitle: Understanding GitHub Actions
intro: 'Learn the basics of {% data variables.product.prodname_actions %}, including core concepts and essential terminology.'
miniTocMaxHeadingLevel: 3
redirect_from:
- /github/automating-your-workflow-with-github-actions/core-concepts-for-github-actions
- /actions/automating-your-workflow-with-github-actions/core-concepts-for-github-actions
@@ -82,7 +83,7 @@ For more information, see "[Creating actions](/actions/creating-actions)."
### Runners
{% data reusables.actions.about-runners %} Each runner can run a single job at a time. {% ifversion ghes or ghae %} You must host your own runners for {% data variables.product.product_name %}. {% elsif fpt or ghec %}{% data variables.product.company_short %} provides Ubuntu Linux, Microsoft Windows, and macOS runners to run your workflows; each workflow run executes in a fresh, newly-provisioned virtual machine. If you need a different operating system or require a specific hardware configuration, you can host your own runners.{% endif %} For more information{% ifversion fpt or ghec %} about self-hosted runners{% endif %}, see "[Hosting your own runners](/actions/hosting-your-own-runners)."
{% data reusables.actions.about-runners %} Each runner can run a single job at a time. {% ifversion ghes or ghae %} You must host your own runners for {% data variables.product.product_name %}. {% elsif fpt or ghec %}{% data variables.product.company_short %} provides Ubuntu Linux, Microsoft Windows, and macOS runners to run your workflows; each workflow run executes in a fresh, newly-provisioned virtual machine. {% ifversion actions-hosted-runners %} {% data variables.product.prodname_dotcom %} also offers {% data variables.actions.hosted_runner %}s, which are available in larger configurations. For more information, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)." {% endif %}If you need a different operating system or require a specific hardware configuration, you can host your own runners.{% endif %} For more information{% ifversion fpt or ghec %} about self-hosted runners{% endif %}, see "[Hosting your own runners](/actions/hosting-your-own-runners)."
{% data reusables.actions.workflow-basic-example-and-explanation %}

View File

@@ -98,9 +98,9 @@ Each time you create a new release, you can trigger a workflow to publish your p
### Configuring the destination repository
If you don't provide the `repository` key in your *package.json* file, then {% data variables.product.prodname_registry %} publishes a package in the {% data variables.product.prodname_dotcom %} repository you specify in the `name` field of the *package.json* file. For example, a package named `@my-org/test` is published to the `my-org/test` {% data variables.product.prodname_dotcom %} repository.
Linking your package to {% data variables.product.prodname_registry %} using the `repository` key is optional. If you choose not to provide the `repository` key in your *package.json* file, then {% data variables.product.prodname_registry %} publishes a package in the {% data variables.product.prodname_dotcom %} repository you specify in the `name` field of the *package.json* file. For example, a package named `@my-org/test` is published to the `my-org/test` {% data variables.product.prodname_dotcom %} repository. If the `url` specified in the `repository` key is invalid, your package may still be published however it won't be linked to the repository source as intended.
However, if you do provide the `repository` key, then the repository in that key is used as the destination npm registry for {% data variables.product.prodname_registry %}. For example, publishing the below *package.json* results in a package named `my-amazing-package` published to the `octocat/my-other-repo` {% data variables.product.prodname_dotcom %} repository.
If you do provide the `repository` key in your *package.json* file, then the repository in that key is used as the destination npm registry for {% data variables.product.prodname_registry %}. For example, publishing the below *package.json* results in a package named `my-amazing-package` published to the `octocat/my-other-repo` {% data variables.product.prodname_dotcom %} repository. Once published, only the repository source is updated, and the package doesn't inherit any permissions from the destination repository.
```json
{

View File

@@ -86,6 +86,15 @@ While the job runs, the logs and output can be viewed in the {% data variables.p
## Supported runners and hardware resources
{% ifversion actions-hosted-runners %}
{% note %}
**Note**: {% data variables.product.prodname_dotcom %} also offers {% data variables.actions.hosted_runner %}s, which are available in larger configurations. For more information, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)."
{% endnote %}
{% endif %}
Hardware specification for Windows and Linux virtual machines:
- 2-core CPU (x86_64)
- 7 GB of RAM

View File

@@ -0,0 +1,50 @@
---
title: Controlling access to larger runners
intro: You can use policies to limit access to {% data variables.actions.hosted_runner %}s that have been added to an organization or enterprise.
product: '{% data reusables.gated-features.hosted-runners %}'
versions:
feature: 'actions-hosted-runners'
type: tutorial
shortTitle: Controlling access to {% data variables.actions.hosted_runner %}s
---
{% data reusables.actions.enterprise-beta %}
{% data reusables.actions.enterprise-github-hosted-runners %}
## About runner groups
{% data reusables.actions.about-runner-groups %} {% ifversion fpt %}For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/using-github-hosted-runners/controlling-access-to-larger-runners).{% endif %}
{% ifversion ghec or ghes or ghae %}
## Creating a runner group for an organization
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.creating-a-runner-group-for-an-organization %}
## Creating a runner group for an enterprise
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.creating-a-runner-group-for-an-enterprise %}
{% endif %}
## Changing the access policy of a runner group
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.changing-the-access-policy-of-a-runner-group %}
## Changing the name of a runner group
{% data reusables.actions.changing-the-name-of-a-runner-group %}
{% ifversion ghec or ghes or ghae %}
## Moving a runner to a group
{% data reusables.actions.moving-a-runner-to-a-group %}
## Removing a runner group
{% data reusables.actions.removing-a-runner-group %}
{% endif %}

View File

@@ -7,6 +7,8 @@ versions:
ghes: '*'
children:
- /about-github-hosted-runners
- /using-larger-runners
- /controlling-access-to-larger-runners
- /monitoring-your-current-jobs
- /customizing-github-hosted-runners
- /connecting-to-a-private-network

View File

@@ -0,0 +1,135 @@
---
title: Using larger runners
intro: '{% data variables.product.prodname_dotcom %} offers larger runners with more RAM and CPU.'
miniTocMaxHeadingLevel: 3
product: '{% data reusables.gated-features.hosted-runners %}'
versions:
feature: 'actions-hosted-runners'
shortTitle: Using {% data variables.actions.hosted_runner %}s
---
## Overview of {% data variables.actions.hosted_runner %}s
In addition to the [standard {% data variables.product.prodname_dotcom %}-hosted runners](/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources), {% data variables.product.prodname_dotcom %} also offers customers on {% data variables.product.prodname_team %} and {% data variables.product.prodname_ghe_cloud %} plans a range of {% data variables.actions.hosted_runner %}s with more RAM and CPU. These runners are hosted by {% data variables.product.prodname_dotcom %} and have the runner application and other tools preinstalled.
When you add a {% data variables.actions.hosted_runner %} to an organization, you are defining a type of machine from a selection of available hardware specifications and operating system images. {% data variables.product.prodname_dotcom %} will then create multiple instances of this runner that scale up and down to match the job demands of your organization, based on the autoscaling limits you define.
## Architectural overview of {% data variables.actions.hosted_runner %}s
The {% data variables.actions.hosted_runner %}s are managed at the organization level, where they are arranged into groups that can contain multiple instances of the runner. They can also be created at the enterprise level and shared with organizations in the hierarchy. Once you've created a group, you can then add a runner to the group and update your workflows to target the label assigned to the {% data variables.actions.hosted_runner %}. You can also control which repositories are permitted to send jobs to the group for processing. For more information about groups, see "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
In the following diagram, a class of hosted runner named `ubuntu-20.04-16core` has been defined with customized hardware and operating system configuration.
![Diagram explaining {% data variables.actions.hosted_runner %}](/assets/images/hosted-runner.png)
1. Instances of this runner are automatically created and added to a group called `ubuntu-20.04-16core`.
2. The runners have been assigned the label `ubuntu-20.04-16core`.
3. Workflow jobs use the `ubuntu-20.04-16core` label in their `runs-on` key to indicate the type of runner they need to execute the job.
4. {% data variables.product.prodname_actions %} checks the runner group to see if your repository is authorized to send jobs to the runner.
5. The job runs on the next available instance of the `ubuntu-20.04-16core` runner.
## Autoscaling {% data variables.actions.hosted_runner %}s
Your {% data variables.actions.hosted_runner %}s can be configured to automatically scale to suit your needs. When jobs are submitted for processing, more machines can be automatically provisioned to run the jobs, until reaching a pre-defined maximum limit. Each machine only handles one job at a time, so these settings effectively determine the number of jobs that can be run concurrently.
During the runner deployment process, you can configure the _Max_ option, which allows you to control your costs by setting the maximum parallel number of machines that are created in this set. A higher value here can help avoid workflows being blocked due to parallelism.
## Networking for {% data variables.actions.hosted_runner %}s
By default, {% data variables.actions.hosted_runner %}s receive a dynamic IP address that changes for each job run. Optionally, {% data variables.product.prodname_ghe_cloud %} customers can configure their {% data variables.actions.hosted_runner %}s to receive a static IP address from {% data variables.product.prodname_dotcom %}'s IP address pool. When enabled, instances of the {% data variables.actions.hosted_runner %} will receive an address from a range that is unique to the runner, allowing you to use this range to configure a firewall allowlist. You can use up to 10 static IP address ranges in total across all your {% data variables.actions.hosted_runner %}s.
{% note %}
**Note**: If runners are unused for more than 30 days, their IP address ranges are automatically removed and cannot be recovered.
{% endnote %}
## Planning for {% data variables.actions.hosted_runner %}s
### Create a runner group
Runner groups are used to collect sets of virtual machines and create a security boundary around them. You can then decide which organizations or repositories are permitted to run jobs on those sets of machines. During the {% data variables.actions.hosted_runner %} deployment process, the runner can be added to an existing group, or otherwise it will join a default group. You can create a group by following the steps in "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
### Understanding billing
Compared to standard {% data variables.product.prodname_dotcom %}-hosted runners, {% data variables.actions.hosted_runner %}s are billed differently. For more information, see "[Per-minute rates](/billing/managing-billing-for-github-actions/about-billing-for-github-actions#per-minute-rates)".
## Adding a {% data variables.actions.hosted_runner %} to an enterprise
You can add {% data variables.actions.hosted_runner %}s to an enterprise, where they can be assigned to multiple organizations. The organization admins can then control which repositories can use the runners. To add a {% data variables.actions.hosted_runner %} to an enterprise, you must be an enterprise owner.
{% data reusables.actions.add-hosted-runner-overview %}
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.actions-tab %}
{% data reusables.enterprise-accounts.actions-runners-tab %}
{% data reusables.actions.add-hosted-runner %}
1. To allow organizations to access your {% data variables.actions.hosted_runner %}s, you specify the list of organizations that can use it. For more information, see "[Managing access to your runners](#managing-access-to-your-runners)."
## Adding a {% data variables.actions.hosted_runner %} to an organization
You can add a {% data variables.actions.hosted_runner %} to an organization, where the organization admins can control which repositories can use it.
{% data reusables.actions.add-hosted-runner-overview %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runners %}
{% data reusables.actions.add-hosted-runner %}
1. To allow repositories to access your {% data variables.actions.hosted_runner %}s, add them to the list of repositories that can use it. For more information, see "[Managing access to your runners](#managing-access-to-your-runners)."
## Running jobs on your runner
Once your runner type has been been defined, you can update your workflows to send jobs to the runner instances for processing. In this example, a runner group is populated with Ubuntu 16-core runners, which have been assigned the label `ubuntu-20.04-16core`. If you have a runner matching this label, the `check-bats-version` job then uses the `runs-on` key to target that runner whenever the job is run:
```yaml
name: learn-github-actions
on: [push]
jobs:
check-bats-version:
runs-on: ubuntu-20.04-16core
steps:
- uses: {% data reusables.actions.action-checkout %}
- uses:{% data reusables.actions.action-setup-node %}
with:
node-version: '14'
- run: npm install -g bats
- run: bats -v
```
## Managing access to your runners
{% note %}
**Note**: Before your workflows can send jobs to {% data variables.actions.hosted_runner %}s, you must first configure permissions for the runner group. See the following sections for more information.
{% endnote %}
Runner groups are used to control which repositories can run jobs on your {% data variables.actions.hosted_runner %}s. You must grant access to the group from each level of the management hierarchy, depending on where you've defined the {% data variables.actions.hosted_runner %}:
- **Runners at the enterprise level**: Configure the runner group to grant access to all the required organizations. In addition, for each organization, you must configure the group to specify which repositories are allowed access.
- **Runners at the organization level**: Configure the runner group by specifying which repositories are allowed access.
For example, the following diagram has a runner group named `grp-ubuntu-20.04-16core` at the enterprise level. Before the repository named `octo-repo` can use the runners in the group, you must first configure the group at the enterprise level to allow access from the `octo-org` organization; you must then configure the group at the organization level to allow access from `octo-repo`:
![Diagram explaining {% data variables.actions.hosted_runner %} groups](/assets/images/hosted-runner-mgmt.png)
### Allowing repositories to access a runner group
This procedure demonstrates how to configure group permissions at the enterprise and organization levels:
{% data reusables.actions.runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
- For runner groups in an enterprise: under **Organization access**, modify which organizations can access the runner group.
- For runner groups in an organization: under **Repository access**, modify which repositories can access the runner group.
{% warning %}
**Warning**:
{% data reusables.actions.hosted-runner-security %}
For more information, see "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
{% endwarning %}

View File

@@ -51,7 +51,7 @@ For more information on workflow run artifacts, see "[Persisting workflow data u
A workflow can access and restore a cache created in the current branch, the base branch (including base branches of forked repositories), or the default branch (usually `main`). For example, a cache created on the default branch would be accessible from any pull request. Also, if the branch `feature-b` has the base branch `feature-a`, a workflow triggered on `feature-b` would have access to caches created in the default branch (`main`), `feature-a`, and `feature-b`.
Access restrictions provide cache isolation and security by creating a logical boundary between different branches. For example, a cache created for the branch `feature-a` (with the base `main`) would not be accessible to a pull request for the branch `feature-c` (with the base `main`).
Access restrictions provide cache isolation and security by creating a logical boundary between different branches or tags. For example, a cache created for the branch `feature-a` (with the base `main`) would not be accessible to a pull request for the branch `feature-c` (with the base `main`). On similar lines, a cache created for the tag `release-a` (from the base `main`) would not be accessible to a workflow triggered for the tag `release-b` (with the base `main`).
Multiple workflows within a repository share cache entries. A cache created for a branch within a workflow can be accessed and restored from another workflow for the same repository and branch.

View File

@@ -23,7 +23,7 @@ topics:
Rather than copying and pasting from one workflow to another, you can make workflows reusable. You and anyone with access to the reusable workflow can then call the reusable workflow from another workflow.
Reusing workflows avoids duplication. This makes workflows easier to maintain and allows you to create new workflows more quickly by building on the work of others, just as you do with actions. Workflow reuse also promotes best practice by helping you to use workflows that are well designed, have already been tested, and have been proved to be effective. Your organization can build up a library of reusable workflows that can be centrally maintained.
Reusing workflows avoids duplication. This makes workflows easier to maintain and allows you to create new workflows more quickly by building on the work of others, just as you do with actions. Workflow reuse also promotes best practice by helping you to use workflows that are well designed, have already been tested, and have been proven to be effective. Your organization can build up a library of reusable workflows that can be centrally maintained.
The diagram below shows three build jobs on the left of the diagram. After each of these jobs completes successfully a dependent job called "Deploy" runs. This job calls a reusable workflow that contains three jobs: "Staging", "Review", and "Production." The "Production" deployment job only runs after the "Staging" job has completed successfully. Using a reusable workflow to run deployment jobs allows you to run those jobs for each build without duplicating code in workflows.
@@ -127,7 +127,7 @@ You can define inputs and secrets, which can be passed from the caller workflow
runs-on: ubuntu-latest
environment: production
steps:
- uses: ./.github/workflows/my-action
- uses: octo-org/my-action@v1
with:
username: ${{ inputs.username }}
token: ${{ secrets.envPAT }}
@@ -168,12 +168,13 @@ jobs:
name: Pass input and secrets to my-action
runs-on: ubuntu-latest
steps:
- uses: ./.github/workflows/my-action
- uses: octo-org/my-action@v1
with:
username: ${{ inputs.username }}
token: ${{ secrets.token }}
```
{% endraw %}
{% ifversion actions-reusable-workflow-matrix %}
## Using a matrix strategy with a reusable workflow

View File

@@ -334,6 +334,12 @@ Write-Output "::add-mask::Mona The Octocat"
{% endpowershell %}
{% warning %}
**Warning:** Make sure you register the secret with 'add-mask' before outputting it in the build logs or using it in any other workflow commands.
{% endwarning %}
### Example: Masking an environment variable
When you print the variable `MY_NAME` or the value `"Mona The Octocat"` in the log, you'll see `"***"` instead of `"Mona The Octocat"`.

View File

@@ -0,0 +1,52 @@
---
title: Configuring dependency review for your appliance
shortTitle: Configuring dependency review
intro: 'To helps users understand dependency changes when reviewing pull requests, you can enable, configure, and disable dependency review for {% data variables.product.product_location %}.'
product: '{% data reusables.gated-features.dependency-review %}'
miniTocMaxHeadingLevel: 3
versions:
feature: dependency-review-action-ghes
type: how_to
topics:
- Advanced Security
- Enterprise
- Dependency review
- Security
---
{% data reusables.dependency-review.beta %}
## About dependency review
{% data reusables.dependency-review.feature-overview %}
Some additional features, such as license checks, blocking of pull requests, and CI/CD integration, are available with the [dependency review action](https://github.com/actions/dependency-review-action).
## Checking whether your license includes {% data variables.product.prodname_GH_advanced_security %}
{% data reusables.advanced-security.check-for-ghas-license %}
## Prerequisites for dependency review
- A license for {% data variables.product.prodname_GH_advanced_security %}{% ifversion ghes %} (see "[About billing for {% data variables.product.prodname_GH_advanced_security %}](/billing/managing-billing-for-github-advanced-security/about-billing-for-github-advanced-security)").{% endif %}
- The dependency graph enabled for the instance. Site administrators can enable the dependency graph via the management console or the administrative shell (see "[Enabling the dependency graph for your enterprise](/admin/code-security/managing-supply-chain-security-for-your-enterprise/enabling-the-dependency-graph-for-your-enterprise)").
- {% data variables.product.prodname_github_connect %} enabled to download and synchronize vulnerabilities from the {% data variables.product.prodname_advisory_database %}. This is usually configured as part of setting up {% data variables.product.prodname_dependabot %} (see "[Enabling Dependabot for your enterprise](/admin/configuration/configuring-github-connect/enabling-dependabot-for-your-enterprise)").
## Enabling and disabling dependency review
To enable or disable dependency review, you need to enable or disable the dependency graph for your instance.
For more information, see "[Enabling the dependency graph for your enterprise](/admin/code-security/managing-supply-chain-security-for-your-enterprise/enabling-the-dependency-graph-for-your-enterprise)."
## Running dependency review using {% data variables.product.prodname_actions %}
{% data reusables.dependency-review.dependency-review-action-beta-note %}
The dependency review action is included in your installation of {% data variables.product.prodname_ghe_server %}. It is available for all repositories that have {% data variables.product.prodname_GH_advanced_security %} and dependency graph enabled.
{% data reusables.dependency-review.dependency-review-action-overview %}
Users run the dependency review action using a {% data variables.product.prodname_actions %} workflow. If you have not already set up runners for {% data variables.product.prodname_actions %}, you must do this to enable users to run workflows. You can provision self-hosted runners at the repository, organization, or enterprise account level. For information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners)" and "[Adding self-hosted runners](/actions/hosting-your-own-runners/adding-self-hosted-runners)."

View File

@@ -14,6 +14,7 @@ topics:
children:
- /enabling-github-advanced-security-for-your-enterprise
- /configuring-code-scanning-for-your-appliance
- /configuring-dependency-review-for-your-appliance
- /configuring-secret-scanning-for-your-appliance
---

View File

@@ -23,7 +23,7 @@ You can enable the dependency graph via the {% data variables.enterprise.managem
## Enabling the dependency graph via the {% data variables.enterprise.management_console %}
If your {% data variables.product.product_location %} uses clustering, you cannot enable the dependency graph with the {% data variables.enterprise.management_console %} and must use the administrative shell instead. For more information, see "[Enabling the dependency graph via the administrative shell](#enabling-the-dependency-graph-via-the-administrative-shell)."
If {% data variables.product.product_location %} uses clustering, you cannot enable the dependency graph with the {% data variables.enterprise.management_console %} and must use the administrative shell instead. For more information, see "[Enabling the dependency graph via the administrative shell](#enabling-the-dependency-graph-via-the-administrative-shell)."
{% data reusables.enterprise_site_admin_settings.sign-in %}
{% data reusables.enterprise_site_admin_settings.access-settings %}

View File

@@ -16,7 +16,9 @@ topics:
---
If you configure a hostname instead of a hard-coded IP address, you will be able to change the physical hardware that {% data variables.product.product_location %} runs on without affecting users or client software.
The hostname setting in the {% data variables.enterprise.management_console %} should be set to an appropriate fully qualified domain name (FQDN) which is resolvable on the internet or within your internal network. For example, your hostname setting could be `github.companyname.com.` Web and API requests will automatically redirect to the hostname configured in the {% data variables.enterprise.management_console %}. Note that `localhost` is not a valid hostname setting.
The hostname setting in the {% data variables.enterprise.management_console %} should be set to an appropriate fully qualified domain name (FQDN) which is resolvable on the internet or within your internal network. For example, your hostname setting could be `github.companyname.com.` Web and API requests will automatically redirect to the hostname configured in the {% data variables.enterprise.management_console %}. Note that `localhost` is not a valid hostname setting.
Hostnames must be less than 63 characters in length per [Section 2.3.4 of the Domain Names Specification RFC](https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4).
After you configure a hostname, you can enable subdomain isolation to further increase the security of {% data variables.product.product_location %}. For more information, see "[Enabling subdomain isolation](/enterprise/admin/guides/installation/enabling-subdomain-isolation/)."

View File

@@ -90,6 +90,14 @@ settings to allow incoming emails](#configuring-dns-and-firewall-settings-to-all
You can enforce TLS encryption for all incoming SMTP connections, which can help satisfy an ISO-27017 certification requirement.
{%- ifversion ghes = 3.6 %}
{% note %}
**Note**: Enforcement of TLS for SMTP connections is unavailable in {% data variables.product.product_name %} 3.6.0. The feature will be available in an upcoming release.
{% endnote %}
{%- endif %}
{% data reusables.enterprise_site_admin_settings.email-settings %}
1. Under "Authentication," select **Enforce TLS auth (recommended)**.

View File

@@ -131,46 +131,49 @@ Key | Description
{% ifversion ghes %}
## Indexing
GitHub's [code search][] features are powered by [ElasticSearch][]. This section of the site admin dashboard shows you the current status of your ElasticSearch cluster and provides you with several tools to control the behavior of searching and indexing. These tools are split into the following three categories.
GitHub's search features are powered by Elasticsearch. This section of the site admin dashboard shows you the current status of your Elasticsearch cluster and provides you with several tools to control search and index behavior.
[Code Search]: https://github.com/blog/1381-a-whole-new-code-search
[ElasticSearch]: http://www.elasticsearch.org/
For more information about code search, see "[Searching for information on {% data variables.product.prodname_dotcom %}](/search-github)." For more information about Elasticsearch, see the [Elasticsearch website](https://elastic.co).
{% note %}
**Note**: In normal use, site administrators do not need to create new indices or schedule repair jobs. For troubleshooting or other support purposes, {% data variables.contact.github_support %} may instruct you to run a repair job.
{% endnote %}
### Index management
{% data variables.product.product_name %} reconciles the state of the search index with data on the instance automatically and regularly.
- Issues, pull requests, repositories, and users in the database
- Git repositories (source code) on disk
Your instance uses repair jobs to reconcile the data, and schedules a repair job in the background when the following events occur.
- A new search index is created.
- Missing data needs to be backfilled.
- Old search data needs to be updated.
You can create a new index, or you can click on an existing index in the list to manage the index. You can perform the following operations on an index.
- Make the index searchable.
- Make the index writable.
- Update the index.
- Delete the index
- Reset the index repair state.
- Start a new index repair job.
- Enable or disable index repair jobs.
A progress bar shows the current status of a repair job across background workers. The bar is the percentage difference of the repair offset with the highest record ID in the database. You can ignore the value shown in the progress bar after a repair job has completed. The progress bar shows the difference between the repair offset and the highest record ID in the database, and will decrease as more repositories are added to {% data variables.product.product_location %} even though those repositories are actually indexed.
To minimize the effects on I/O performance and reduce the chances of operations timing out, run the repair job during off-peak hours. As the job reconciles the search index with database and Git repository data, one CPU will be used. Monitor your system's load averages and CPU usage with a utility like `top`. If you don't notice any significant increase in resource consumption, it should also be safe to run an index repair job during peak hours.
Repair jobs use a "repair offset" for parallelization. This is an offset into the database table for the record being reconciled. Multiple background jobs can synchronize work based on this offset.
### Code search
This allows you to enable or disable both search and index operations on source code.
### Code search index repair
This controls how the code search index is repaired. You can
- enable or disable index repair jobs
- start a new index repair job
- reset all index repair state
{% data variables.product.prodname_enterprise %} uses repair jobs to reconcile the state of the search index with data stored in a database (issues, pull requests, repositories, and users) and data stored in Git repositories (source code). This happens when
- a new search index is created;
- missing data needs to be backfilled; or
- old search data needs to be updated.
In other words, repair jobs are started as needed and run in the background—they are not scheduled by site admins in any way.
Furthermore, repair jobs use a "repair offset" for parallelization. This is an offset into the database table for the record being reconciled. Multiple background jobs can synchronize work based on this offset.
A progress bar shows the current status of a repair job across all of its background workers. It is the percentage difference of the repair offset with the highest record ID in the database. Don't worry about the value shown in the progress bar after a repair job has completed: because it shows the difference between the repair offset and the highest record ID in the database, it will decrease as more repositories are added to {% data variables.product.product_location %} even though those repositories are actually indexed.
You can start a new code-search index repair job at any time. It will use a single CPU as it reconciles the search index with database and Git repository data. To minimize the effects this will have on I/O performance and reduce the chances of operations timing out, try to run a repair job during off-peak hours first. Monitor your system's load averages and CPU usage with a utility like `top`; if you don't notice any significant changes, it should be safe to run an index repair job during peak hours, as well.
### Issues index repair
This controls how the [Issues][] index is repaired. You can
[Issues]: https://github.com/blog/831-issues-2-0-the-next-generation
- enable or disable index repair jobs
- start a new index repair job
- reset all index repair state
{% endif %}
## Reserved logins

View File

@@ -14,7 +14,7 @@ topics:
- Infrastructure
shortTitle: Initiate failover to appliance
---
The time required to failover depends on how long it takes to manually promote the replica and redirect traffic. The average time ranges between 2-10 minutes.
The time required to failover depends on how long it takes to manually promote the replica and redirect traffic. The average time ranges between 20-30 minutes.
{% data reusables.enterprise_installation.promoting-a-replica %}

View File

@@ -83,7 +83,7 @@ You can create a runner group to manage access to the runner that you added to y
{% data variables.product.product_name %} adds all new runners to a group. Runners can be in one group at a time. By default, {% data variables.product.product_name %} adds new runners to the "Default" group.
{% data reusables.actions.self-hosted-runner-groups-add-to-enterprise-first-steps %}
{% data reusables.actions.runner-groups-add-to-enterprise-first-steps %}
1. To choose a policy for organization access, under "Organization access", select the **Organization access** drop-down, and click **Selected organizations**.
1. To the right of the drop-down with the organization access policy, click {% octicon "gear" aria-label="The Gear icon" %}.
1. Select the organizations you'd like to grant access to the runner group.
@@ -100,7 +100,7 @@ You can create a runner group to manage access to the runner that you added to y
{% endwarning %}
{%- endif %}
{% data reusables.actions.self-hosted-runner-create-group %}
{% data reusables.actions.create-runner-group %}
{%- ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
1. Click the "Runners" tab.
1. In the list of runners, click the runner that you deployed in the previous section.

View File

@@ -45,6 +45,7 @@ includeGuides:
- /admin/configuration/configuring-built-in-firewall-rules
- /admin/configuration/configuring-code-scanning-for-your-appliance
- /admin/configuration/configuring-data-encryption-for-your-enterprise
- /admin/code-security/managing-github-advanced-security-for-your-enterprise/configuring-dependency-review-for-your-appliance
- /admin/configuration/configuring-dns-nameservers
- /admin/configuration/configuring-rate-limits
- /admin/configuration/configuring-secret-scanning-for-your-appliance
@@ -98,7 +99,7 @@ includeGuides:
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-dependency-insights-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-github-actions-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-security-settings-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-projects-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-repository-management-policies-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-team-policies-in-your-enterprise
- /admin/policies/enforcing-policies-for-your-enterprise/restricting-email-notifications-for-your-enterprise

View File

@@ -146,6 +146,9 @@ The scope of the events that appear in your enterprise's audit log depend on whe
{%- ifversion ghec or ghes %}
| `business.set_fork_pr_workflows_policy` | The policy for workflows on private repository forks was changed. For more information, see "{% ifversion ghec %}[Enforcing policies for {% data variables.product.prodname_actions %} in an enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-github-actions-in-your-enterprise#enforcing-a-policy-for-fork-pull-requests-in-private-repositories){% else ifversion ghes > 2.22 %}[Enabling workflows for private repository forks](/admin/github-actions/enabling-github-actions-for-github-enterprise-server/enforcing-github-actions-policies-for-your-enterprise#enabling-workflows-for-private-repository-forks){% endif %}."
{%- endif %}
{%- ifversion audit-log-sso-response-events %}
|`business.sso_response` | A SAML single sign-on (SSO) response was generated when a member attempted to authenticate with your enterprise. This event is only available via audit log streaming and the REST API.
{%- endif %}
{%- ifversion ghes %}
| `business.update_actions_settings` | An enterprise owner or site administrator updated {% data variables.product.prodname_actions %} policy settings for an enterprise. For more information, see "[Enforcing policies for GitHub Actions in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-github-actions-in-your-enterprise)."
{%- endif %}
@@ -730,8 +733,8 @@ Before you'll see `git` category actions, you must enable Git events in the audi
{%- ifversion fpt or ghec or ghes %}
| `org.set_fork_pr_workflows_policy` | The policy for workflows on private repository forks was changed. For more information, see "[Enabling workflows for private repository forks](/organizations/managing-organization-settings/disabling-or-limiting-github-actions-for-your-organization#enabling-workflows-for-private-repository-forks)."
{%- endif %}
{%- ifversion ghes %}
| `org.sso_response` | A SAML single sign-on response was generated when a member attempted to authenticate with an organization.
{%- ifversion ghes or audit-log-sso-response-events %}
| `org.sso_response` | A SAML single sign-on (SSO) response was generated when a member attempted to authenticate with your organization. This event is only available via audit log streaming and the REST API.
{%- endif %}
{%- ifversion not ghae %}
| `org.transform` | A user account was converted into an organization. For more information, see "[Converting a user into an organization](/github/setting-up-and-managing-your-github-user-account/converting-a-user-into-an-organization)."
@@ -793,9 +796,9 @@ Before you'll see `git` category actions, you must enable Git events in the audi
| Action | Description
|--------|-------------
| `organization_projects_change.clear` | An enterprise owner{% ifversion ghes %} or site administrator{% endif %} cleared the policy setting for organization-wide project boards in an enterprise. For more information, see "[Enforcing a policy for organization-wide project boards](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
| `organization_projects_change.disable` | Organization projects were disabled for all organizations in an enterprise. For more information, see "[Enforcing a policy for organization-wide project boards](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
| `organization_projects_change.enable` | Organization projects were enabled for all organizations in an enterprise. For more information, see "[Enforcing a policy for organization-wide project boards](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
| `organization_projects_change.clear` | An enterprise owner{% ifversion ghes %} or site administrator{% endif %} cleared the policy setting for organization-wide project boards in an enterprise. For more information, see "[Enforcing policies for projects in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-projects-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
| `organization_projects_change.disable` | Organization projects were disabled for all organizations in an enterprise. For more information, see "[Enforcing policies for projects in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-projects-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
| `organization_projects_change.enable` | Organization projects were enabled for all organizations in an enterprise. For more information, see "[Enforcing policies for projects in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-projects-in-your-enterprise#enforcing-a-policy-for-organization-wide-project-boards)."
{%- endif %}
## `packages` category actions
@@ -1110,7 +1113,7 @@ Before you'll see `git` category actions, you must enable Git events in the audi
| Action | Description
|--------|-------------
| `repository_projects_change.clear` | The repository projects policy was removed for an organization, or all organizations in the enterprise. Organization admins can now control their repository projects settings. For more information, see "[Enforcing project board policies in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise)."
| `repository_projects_change.clear` | The repository projects policy was removed for an organization, or all organizations in the enterprise. Organization admins can now control their repository projects settings. For more information, see "[Enforcing policies for projects in your enterprise](/admin/policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-projects-in-your-enterprise)."
| `repository_projects_change.disable` | Repository projects were disabled for a repository, all repositories in an organization, or all organizations in an enterprise.
| `repository_projects_change.enable` | Repository projects were enabled for a repository, all repositories in an organization, or all organizations in an enterprise.

View File

@@ -44,7 +44,8 @@ You set up the audit log stream on {% data variables.product.product_name %} by
- [Amazon S3](#setting-up-streaming-to-amazon-s3)
- [Azure Blob Storage](#setting-up-streaming-to-azure-blob-storage)
- [Azure Event Hubs](#setting-up-streaming-to-azure-event-hubs)
- [Azure Event Hubs](#setting-up-streaming-to-azure-event-hubs){% ifversion streaming-datadog %}
- [Datadog](#setting-up-streaming-to-datadog){% endif %}
- [Google Cloud Storage](#setting-up-streaming-to-google-cloud-storage)
- [Splunk](#setting-up-streaming-to-splunk)
@@ -60,7 +61,7 @@ You can set up streaming to S3 with access keys or, to avoid storing long-lived
#### Setting up streaming to S3 with access keys
{% endif %}
To stream audit logs to Amazon's S3 endpoint, you must have a bucket and access keys. For more information, see [Creating, configuring, and working with Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) in the the AWS documentation. Make sure to block public access to the bucket to protect your audit log information.
To stream audit logs to Amazon's S3 endpoint, you must have a bucket and access keys. For more information, see [Creating, configuring, and working with Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) in the AWS documentation. Make sure to block public access to the bucket to protect your audit log information.
To set up audit log streaming from {% data variables.product.prodname_dotcom %} you will need:
* The name of your Amazon S3 bucket
@@ -231,6 +232,32 @@ You need two pieces of information about your event hub: its instance name and t
{% data reusables.enterprise.verify-audit-log-streaming-endpoint %}
{% ifversion streaming-datadog %}
### Setting up streaming to Datadog
To set up streaming to Datadog, you must create a client token or an API key in Datadog, then configure audit log streaming in {% data variables.product.product_name %} using the token for authentication. You do not need to create a bucket or other storage container in Datadog.
After you set up streaming to Datadog, you can see your audit log data by filtering by "github.audit.streaming." For more information, see [Log Management](https://docs.datadoghq.com/logs/).
1. If you don't already have a Datadog account, create one.
1. In Datadog, generate a client token or an API key, then click **Copy key**. For more information, see [API and Application Keys](https://docs.datadoghq.com/account_management/api-app-keys/) in Datadog Docs.
{% data reusables.enterprise.navigate-to-log-streaming-tab %}
1. Select the **Configure stream** dropdown menu and click **Datadog**.
![Screenshot of the "Configure stream" dropdown menu with "Datadog" highlighted](/assets/images/help/enterprises/audit-stream-choice-datadog.png)
1. Under "Token", paste the token you copied earlier.
![Screenshot of the "Token" field](/assets/images/help/enterprises/audit-stream-datadog-token.png)
1. Select the "Site" dropdown menu and click your Datadog site. To determine your Datadog site, compare your Datadog URL to the table in [Datadog sites](https://docs.datadoghq.com/getting_started/site/) in Datadog Docs.
![Screenshot of the "Site" dropdown menu](/assets/images/help/enterprises/audit-stream-datadog-site.png)
1. To verify that {% data variables.product.prodname_dotcom %} can connect and write to the Datadog endpoint, click **Check endpoint**.
![Check the endpoint](/assets/images/help/enterprises/audit-stream-check.png)
{% data reusables.enterprise.verify-audit-log-streaming-endpoint %}
1. After a few minutes, confirm that audit log data is appearing on the **Logs** tab in Datadog. If audit log data is not appearing, confirm that your token and site are correct in {% data variables.product.prodname_dotcom %}.
{% endif %}
### Setting up streaming to Google Cloud Storage
To set up streaming to Google Cloud Storage, you must create a service account in Google Cloud with the appropriate credentials and permissions, then configure audit log streaming in {% data variables.product.product_name %} using the service account's credentials for authentication.
@@ -292,6 +319,10 @@ To stream audit logs to Splunk's HTTP Event Collector (HEC) endpoint you must ma
Pausing the stream allows you to perform maintenance on the receiving application without losing audit data. Audit logs are stored for up to seven days on {% data variables.product.product_location %} and are then exported when you unpause the stream.
{% ifversion streaming-datadog %}
Datadog only accepts logs from up to 18 hours in the past. If you pause a stream to a Datadog endpoint for more than 18 hours, you risk losing logs that Datadog won't accept after you resume streaming.
{% endif %}
{% data reusables.enterprise.navigate-to-log-streaming-tab %}
1. Click **Pause stream**.

View File

@@ -121,7 +121,7 @@ For more information about the audit log REST API, see "[Enterprise administrati
The query below searches for audit log events created on Jan 1st, 2022 in the `avocado-corp` enterprise, and return the first page with a maximum of 100 items per page using [REST API pagination](/rest/overview/resources-in-the-rest-api#pagination):
```shell
curl -H "Authorization: token <em>TOKEN</em>" \
curl -H "Authorization: Bearer <em>TOKEN</em>" \
--request GET \
"https://api.github.com/enterprises/avocado-corp/audit-log?phrase=created:2022-01-01&page=1&per_page=100"
```
@@ -133,7 +133,7 @@ You can specify multiple search phrases, such as `created` and `actor`, by separ
The query below searches for audit log events for pull requests, where the event occurred on or after Jan 1st, 2022 in the `avocado-corp` enterprise, and the action was performed by the `octocat` user:
```shell
curl -H "Authorization: token <em>TOKEN</em>" \
curl -H "Authorization: Bearer <em>TOKEN</em>" \
--request GET \
"https://api.github.com/enterprises/avocado-corp/audit-log?phrase=action:pull_request+created:>=2022-01-01+actor:octocat"
```

View File

@@ -0,0 +1,73 @@
---
title: Enforcing policies for projects in your enterprise
intro: 'You can enforce policies for {% data variables.projects.projects_v2_and_v1 %} within your enterprise''s organizations, or allow policies to be set in each organization.'
permissions: Enterprise owners can enforce policies for projects in an enterprise.
redirect_from:
- /articles/enforcing-project-board-settings-for-organizations-in-your-business-account
- /articles/enforcing-project-board-policies-for-organizations-in-your-enterprise-account
- /articles/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise-account/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise/setting-policies-for-organizations-in-your-enterprise-account/enforcing-project-board-policies-in-your-enterprise-account
- /admin/policies/enforcing-policies-for-your-enterprise/enforcing-project-board-policies-in-your-enterprise
versions:
ghec: '*'
ghes: '*'
ghae: '*'
type: how_to
topics:
- Enterprise
- Policies
- Projects
shortTitle: Project board policies
---
## About policies for projects in your enterprise
You can enforce policies to control how enterprise members manage {% data variables.projects.projects_v2_and_v1 %}, or you can allow organization owners to manage policies for {% data variables.projects.projects_v2_and_v1 %} at the organization level.{% ifversion project-visibility-policy %}
Some policies apply to both {% data variables.product.prodname_projects_v2 %}, the new projects experience, and {% data variables.product.prodname_projects_v1 %}, the previous experience, while some apply only to {% data variables.product.prodname_projects_v1 %}. For more information about each experience, see "[About {% data variables.product.prodname_projects_v2 %}](/issues/planning-and-tracking-with-projects/learning-about-projects/about-projects)" and "[About {% data variables.product.prodname_projects_v1 %}](/issues/organizing-your-work-with-project-boards/managing-project-boards/about-project-boards)."
{% else %}For more information, see "[About project boards](/issues/organizing-your-work-with-project-boards/managing-project-boards/about-project-boards)."{% endif %}
## Enforcing a policy for organization-wide projects
Across all organizations owned by your enterprise, you can enable or disable organization-wide project boards, or allow owners to administer the setting on the organization level.
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.projects-tab %}
4. Under "Organization projects", review the information about changing the setting. {% data reusables.enterprise-accounts.view-current-policy-config-orgs %}
5. Under "Organization projects", use the drop-down menu and choose a policy.
![Drop-down menu with organization project board policy options](/assets/images/help/business-accounts/organization-projects-policy-drop-down.png)
{% ifversion project-visibility-policy %}
## Enforcing a policy for visibility changes to projects
Across all organizations owned by your enterprise, you can enable or disable the ability for people with admin access to a project to change the visibility of the project, or you can allow owners to administer the setting on the organization level.
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.projects-tab %}
1. Under "Project visibility change permission", review the information about changing the setting. {% data reusables.enterprise-accounts.view-current-policy-config-orgs %}
1. Select the dropdown menu, then click a policy.
![Screenshot of the dropdown menu for configuring the "Project visibility change permission" policy](/assets/images/help/business-accounts/project-visibility-change-drop-down.png)
{% endif %}
{% ifversion projects-v1 %}
## Enforcing policies for {% data variables.product.prodname_projects_v1 %}
Some policies apply only to {% data variables.product.prodname_projects_v1 %}.
### Enforcing a policy for repository projects
Across all organizations owned by your enterprise, you can enable or disable repository-level projects, or allow owners to administer the setting at the organization level.
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.projects-tab %}
4. Under "Repository projects", review the information about changing the setting. {% data reusables.enterprise-accounts.view-current-policy-config-orgs %}
5. Under "Repository projects", use the drop-down menu and choose a policy.
![Drop-down menu with repository project board policy options](/assets/images/help/business-accounts/repository-projects-policy-drop-down.png)
{% endif %}

View File

@@ -81,6 +81,8 @@ You can also configure allowed IP addresses for an individual organization. For
{% data reusables.identity-and-permissions.about-adding-ip-allow-list-entries %}
{% data reusables.identity-and-permissions.ipv6-allow-lists %}
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.settings-tab %}
{% data reusables.enterprise-accounts.security-tab %}

View File

@@ -1,48 +0,0 @@
---
title: Enforcing project board policies in your enterprise
intro: 'You can enforce policies for projects within your enterprise''s organizations, or allow policies to be set in each organization.'
permissions: Enterprise owners can enforce policies for project boards in an enterprise.
redirect_from:
- /articles/enforcing-project-board-settings-for-organizations-in-your-business-account
- /articles/enforcing-project-board-policies-for-organizations-in-your-enterprise-account
- /articles/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise-account/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise/enforcing-project-board-policies-in-your-enterprise-account
- /github/setting-up-and-managing-your-enterprise/setting-policies-for-organizations-in-your-enterprise-account/enforcing-project-board-policies-in-your-enterprise-account
versions:
ghec: '*'
ghes: '*'
ghae: '*'
type: how_to
topics:
- Enterprise
- Policies
- Projects
shortTitle: Project board policies
---
## About policies for project boards in your enterprise
You can enforce policies to control how members of your enterprise on {% data variables.product.product_name %} manage project boards. You can also allow organization owners to manage policies for project boards. For more information, see "[About project boards](/issues/organizing-your-work-with-project-boards/managing-project-boards/about-project-boards)."
## Enforcing a policy for organization-wide project boards
Across all organizations owned by your enterprise, you can enable or disable organization-wide project boards, or allow owners to administer the setting on the organization level.
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.projects-tab %}
4. Under "Organization projects", review the information about changing the setting. {% data reusables.enterprise-accounts.view-current-policy-config-orgs %}
5. Under "Organization projects", use the drop-down menu and choose a policy.
![Drop-down menu with organization project board policy options](/assets/images/help/business-accounts/organization-projects-policy-drop-down.png)
## Enforcing a policy for repository project boards
Across all organizations owned by your enterprise, you can enable or disable repository-level project boards, or allow owners to administer the setting on the organization level.
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.projects-tab %}
4. Under "Repository projects", review the information about changing the setting. {% data reusables.enterprise-accounts.view-current-policy-config-orgs %}
5. Under "Repository projects", use the drop-down menu and choose a policy.
![Drop-down menu with repository project board policy options](/assets/images/help/business-accounts/repository-projects-policy-drop-down.png)

View File

@@ -16,7 +16,7 @@ children:
- /about-enterprise-policies
- /enforcing-repository-management-policies-in-your-enterprise
- /enforcing-team-policies-in-your-enterprise
- /enforcing-project-board-policies-in-your-enterprise
- /enforcing-policies-for-projects-in-your-enterprise
- /restricting-email-notifications-for-your-enterprise
- /enforcing-policies-for-security-settings-in-your-enterprise
- /enforcing-policies-for-dependency-insights-in-your-enterprise

View File

@@ -44,7 +44,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* A [list of the repositories](/free-pro-team@latest/rest/repos#list-organization-repositories) you want to migrate:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-X POST \
-H "Accept: application/vnd.github+json" \
-d'{"lock_repositories":true,"repositories":["<em>orgname</em>/<em>reponame</em>", "<em>orgname</em>/<em>reponame</em>"]}' \
@@ -59,7 +59,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-H "Accept: application/vnd.github+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>
```
@@ -74,7 +74,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-H "Accept: application/vnd.github+json" \
-L -o migration_archive.tar.gz \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/archive
@@ -84,7 +84,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-X DELETE \
-H "Accept: application/vnd.github+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/archive

View File

@@ -133,7 +133,7 @@ To unlock the repositories on a {% data variables.product.prodname_dotcom_the_we
* The unique `id` of the migration
* The name of the repository to unlock
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
-H "Accept: application/vnd.github.wyandotte-preview+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/repos/<em>repo_name</em>/lock
```
@@ -142,7 +142,7 @@ curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
After unlocking the {% data variables.product.prodname_dotcom_the_website %} organization's repositories, you should delete every repository you previously migrated using [the repository delete endpoint](/rest/repos/#delete-a-repository). You'll need your access token for authentication:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
https://api.github.com/repos/<em>orgname</em>/<em>repo_name</em>
```

View File

@@ -26,11 +26,12 @@ You can authorize an existing SSH key, or create a new SSH key and then authoriz
{% data reusables.user-settings.access_settings %}
{% data reusables.user-settings.ssh %}
3. Next to the SSH key you'd like to authorize, click **Enable SSO** or **Disable SSO**.
![SSO token authorize button](/assets/images/help/settings/ssh-sso-button.png)
4. Find the organization you'd like to authorize the SSH key for.
5. Click **Authorize**.
![Token authorize button](/assets/images/help/settings/ssh-sso-authorize.png)
1. To the right of the SSH key you'd like to authorize, click **Configure SSO**.
![Screenshot of the SSO token authorize button](/assets/images/help/settings/ssh-sso-button.png)
1. To the right of the organization you'd like to authorize the SSH key for, click **Authorize**.
![Screenshot of the token authorize button](/assets/images/help/settings/ssh-sso-authorize.png)
## Further reading

View File

@@ -28,11 +28,11 @@ topics:
{% data reusables.command_line.open_the_multi_os_terminal %}
3. Generate a GPG key pair. Since there are multiple versions of GPG, you may need to consult the relevant [_man page_](https://en.wikipedia.org/wiki/Man_page) to find the appropriate key generation command. Your key must use RSA.
- If you are on version 2.1.17 or greater, paste the text below to generate a GPG key pair.
```shell
```shell{:copy}
$ gpg --full-generate-key
```
- If you are not on version 2.1.17 or greater, the `gpg --full-generate-key` command doesn't work. Paste the text below and skip to step 6.
```shell
```shell{:copy}
$ gpg --default-new-key-algo rsa4096 --gen-key
```
4. At the prompt, specify the kind of key you want, or press `Enter` to accept the default.
@@ -51,10 +51,10 @@ topics:
{% data reusables.gpg.list-keys-with-note %}
{% data reusables.gpg.copy-gpg-key-id %}
10. Paste the text below, substituting in the GPG key ID you'd like to use. In this example, the GPG key ID is `3AA5C34371567BD2`:
```shell
$ gpg --armor --export <em>3AA5C34371567BD2</em>
# Prints the GPG key ID, in ASCII armor format
```
```shell{:copy}
$ gpg --armor --export 3AA5C34371567BD2
# Prints the GPG key ID, in ASCII armor format
```
11. Copy your GPG key, beginning with `-----BEGIN PGP PUBLIC KEY BLOCK-----` and ending with `-----END PGP PUBLIC KEY BLOCK-----`.
12. [Add the GPG key to your GitHub account](/articles/adding-a-gpg-key-to-your-github-account).

View File

@@ -1,6 +1,7 @@
---
title: About billing for GitHub Actions
intro: 'If you want to use {% data variables.product.prodname_actions %} beyond the storage or minutes included in your account, you will be billed for additional usage.'
miniTocMaxHeadingLevel: 3
redirect_from:
- /github/setting-up-and-managing-billing-and-payments-on-github/about-billing-for-github-actions
- /github/setting-up-and-managing-billing-and-payments-on-github/managing-billing-for-github-actions/about-billing-for-github-actions
@@ -27,6 +28,14 @@ Minutes reset every month, while storage usage does not.
### Included storage and minutes
{% ifversion actions-hosted-runners %}
{% note %}
**Note**: Entitlement minutes cannot be used for Windows and Ubuntu runners over 2-cores. These runners will always be charged for, including in public repos. For more information, see "[Per-minute rates for runners](/billing/managing-billing-for-github-actions/about-billing-for-github-actions#per-minute-rates)."
{% endnote %}
{% endif %}
|Product | Storage | Minutes (per month)|
|------- | ------- | ---------|
| {% data variables.product.prodname_free_user %} | 500 MB | 2,000 |
@@ -57,15 +66,15 @@ The storage used by a repository is the total storage used by {% data variables.
### Per-minute rates
| Operating system | Per-minute rate (USD) |
|------- | ---------|
| Linux | $0.008 |
| macOS | $0.08 |
| Windows | $0.016 |
{% data reusables.billing.billing-standard-runners %}
{% ifversion actions-hosted-runners %}{% data reusables.billing.billing-hosted-runners %}{% endif %}
The number of jobs you can run concurrently across all repositories in your user or organization account depends on your GitHub plan. For more information, see "[Usage limits and billing](/actions/reference/usage-limits-billing-and-administration)" for {% data variables.product.prodname_dotcom %}-hosted runners and "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners/#usage-limits)" for self-hosted runner usage limits.
{% data reusables.user-settings.context_switcher %}
- The number of jobs you can run concurrently across all repositories in your user or organization account depends on your GitHub plan. For more information, see "[Usage limits and billing](/actions/reference/usage-limits-billing-and-administration)" for {% data variables.product.prodname_dotcom %}-hosted runners and "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners/#usage-limits)" for self-hosted runner usage limits.
- {% data reusables.user-settings.context_switcher %}
{% ifversion actions-hosted-runners %}
- For {% data variables.actions.hosted_runner %}s, there is no additional cost for configurations that assign public static IP addresses to a {% data variables.actions.hosted_runner %}. For more information on {% data variables.actions.hosted_runner %}s, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)."
- Entitlement minutes cannot be used for {% data variables.actions.hosted_runner %}s.
{% endif %}
## Calculating minute and storage spending

View File

@@ -359,7 +359,7 @@ In the following example, the `+` symbol ensures that the specified additional {
## Using a custom configuration file
A custom configuration file is an alternative way to specify additional {% ifversion codeql-packs %}packs and {% endif %}queries to run. You can also use the file to disable the default queries and to specify which directories to scan during analysis.
A custom configuration file is an alternative way to specify additional {% ifversion codeql-packs %}packs and {% endif %}queries to run. You can also use the file to disable the default queries{% ifversion code-scanning-exclude-queries-from-analysis %}, exclude or include specific queries,{% endif %} and to specify which directories to scan during analysis.
In the workflow file, use the `config-file` parameter of the `init` action to specify the path to the configuration file you want to use. This example loads the configuration file _./.github/codeql/codeql-config.yml_.
@@ -442,6 +442,41 @@ Optionally, you can give each array element a name, as shown in the example conf
If you only want to run custom queries, you can disable the default security queries by using `disable-default-queries: true`.
{% ifversion code-scanning-exclude-queries-from-analysis %}
### Excluding specific queries from analysis
You can add `exclude` and `include` filters to your custom configuration file, to specify the queries you want to exclude or include in the analysis.
This is useful if you want to exclude, for example:
- Specific queries from the default suites (`security`, `security-extended` and `security-and-quality`).
- Specific queries whose results do not interest you.
- All the queries that generate warnings and recommendations.
You can use `exclude` filters similar to those in the configuration file below to exclude queries that you want to remove from the default analysis. In the example of configuration file below, both the `js/redundant-assignment` and the `js/useless-assignment-to-local` queries are excluded from analysis.
```yaml
query-filters:
- exclude:
id: js/redundant-assignment
- exclude:
id: js/useless-assignment-to-local
```
To find the id of a query, you can click the alert in the list of alerts in the Security tab. This opens the alert details page. The `Rule ID` field contains the query id. For more information about the alert details page, see "[About {% data variables.product.prodname_code_scanning %} alerts](/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-alerts#about-alert-details)."
{% tip %}
**Tips:**
- The order of the filters is important. The first filter instruction that appears after the instructions about the queries and query packs determines whether the queries are included or excluded by default.
- Subsequent instructions are executed in order and the instructions that appear later in the file take precedence over the earlier instructions.
{% endtip %}
You can find another example illustrating the use of these filters in the "[Example configuration files](#example-configuration-files)" section.
For more information about using `exclude` and `include` filters in your custom configuration file, see "[Creating {% data variables.product.prodname_codeql %} query suites](https://codeql.github.com/docs/codeql-cli/creating-codeql-query-suites/#filtering-the-queries-in-a-query-suite)." For information on the query metadata you can filter on, see "[Metadata for CodeQL queries](https://codeql.github.com/docs/writing-codeql-queries/metadata-for-codeql-queries/)."
{% endif %}
### Specifying directories to scan
For the interpreted languages that {% data variables.product.prodname_codeql %} supports (Python{% ifversion fpt or ghes > 3.3 or ghae-issue-5017 %}, Ruby{% endif %} and JavaScript/TypeScript), you can restrict {% data variables.product.prodname_code_scanning %} to files in specific directories by adding a `paths` array to the configuration file. You can exclude the files in specific directories from analysis by adding a `paths-ignore` array.

View File

@@ -43,8 +43,8 @@ By default, the code scanning alerts page is filtered to show alerts for the def
![Summary of alerts](/assets/images/help/repository/code-scanning-click-alert.png)
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-6249 %}
{% data reusables.code-scanning.alert-default-branch %}
![The "Affected branches" section in an alert](/assets/images/help/repository/code-scanning-affected-branches.png){% endif %}
{% data reusables.code-scanning.alert-default-branch %}
![The "Affected branches" section in an alert](/assets/images/help/repository/code-scanning-affected-branches.png){% endif %}
1. Optionally, if the alert highlights a problem with data flow, click **Show paths** to display the path from the data source to the sink where it's used.
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-6249 %}
![The "Show paths" link on an alert](/assets/images/help/repository/code-scanning-show-paths.png)

View File

@@ -151,25 +151,29 @@ The names of the {% data variables.product.prodname_code_scanning %} analysis ch
![{% data variables.product.prodname_code_scanning %} pull request checks](/assets/images/help/repository/code-scanning-pr-checks.png)
When the {% data variables.product.prodname_code_scanning %} jobs complete, {% data variables.product.prodname_dotcom %} works out whether any alerts were added by the pull request and adds the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" entry to the list of checks. After {% data variables.product.prodname_code_scanning %} has been performed at least once, you can click **Details** to view the results of the analysis. If you used a pull request to add {% data variables.product.prodname_code_scanning %} to the repository, you will initially see {% ifversion fpt or ghes > 3.2 or ghae or ghec %}an "Analysis not found"{% else %}a "Missing analysis"{% endif %} message when you click **Details** on the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" check.
When the {% data variables.product.prodname_code_scanning %} jobs complete, {% data variables.product.prodname_dotcom %} works out whether any alerts were added by the pull request and adds the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" entry to the list of checks. After {% data variables.product.prodname_code_scanning %} has been performed at least once, you can click **Details** to view the results of the analysis.
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}
![Analysis not found for commit message](/assets/images/help/repository/code-scanning-analysis-not-found.png)
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-7095 %}
<!--Troubleshooting section no longer relevant-->
{% elsif ghes < 3.5 or ghae %}
If you used a pull request to add {% data variables.product.prodname_code_scanning %} to the repository, you will initially see {% ifversion ghes > 3.2 or ghae %}an "Analysis not found"{% elsif ghes = 3.2 %}a "Missing analysis"{% endif %} message when you click **Details** on the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" check.
{% ifversion ghes > 3.2 or ghae %}
![Analysis not found for commit message](/assets/images/enterprise/3.4/repository/code-scanning-analysis-not-found.png)
The table lists one or more categories. Each category relates to specific analyses, for the same tool and commit, performed on a different language or a different part of the code. For each category, the table shows the two analyses that {% data variables.product.prodname_code_scanning %} attempted to compare to determine which alerts were introduced or fixed in the pull request.
For example, in the screenshot above, {% data variables.product.prodname_code_scanning %} found an analysis for the merge commit of the pull request, but no analysis for the head of the main branch.
{% else %}
![Missing analysis for commit message](/assets/images/enterprise/3.2/repository/code-scanning-missing-analysis.png)
{% endif %}
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}
### Reasons for the "Analysis not found" message
{% else %}
{% elsif ghes = 3.2 %}
![Missing analysis for commit message](/assets/images/enterprise/3.2/repository/code-scanning-missing-analysis.png)
### Reasons for the "Missing analysis" message
{% endif %}
After {% data variables.product.prodname_code_scanning %} has analyzed the code in a pull request, it needs to compare the analysis of the topic branch (the branch you used to create the pull request) with the analysis of the base branch (the branch into which you want to merge the pull request). This allows {% data variables.product.prodname_code_scanning %} to compute which alerts are newly introduced by the pull request, which alerts were already present in the base branch, and whether any existing alerts are fixed by the changes in the pull request. Initially, if you use a pull request to add {% data variables.product.prodname_code_scanning %} to a repository, the base branch has not yet been analyzed, so it's not possible to compute these details. In this case, when you click through from the results check on the pull request you will see the {% ifversion fpt or ghes > 3.2 or ghae or ghec %}"Analysis not found"{% else %}"Missing analysis for base commit SHA-HASH"{% endif %} message.
After {% data variables.product.prodname_code_scanning %} has analyzed the code in a pull request, it needs to compare the analysis of the topic branch (the branch you used to create the pull request) with the analysis of the base branch (the branch into which you want to merge the pull request). This allows {% data variables.product.prodname_code_scanning %} to compute which alerts are newly introduced by the pull request, which alerts were already present in the base branch, and whether any existing alerts are fixed by the changes in the pull request. Initially, if you use a pull request to add {% data variables.product.prodname_code_scanning %} to a repository, the base branch has not yet been analyzed, so it's not possible to compute these details. In this case, when you click through from the results check on the pull request you will see the {% ifversion ghes > 3.2 or ghae %}"Analysis not found"{% elsif ghes = 3.2 %}"Missing analysis for base commit SHA-HASH"{% endif %} message.
There are other situations where there may be no analysis for the latest commit to the base branch for a pull request. These include:
@@ -177,7 +181,7 @@ There are other situations where there may be no analysis for the latest commit
To check whether a branch has been scanned, go to the {% data variables.product.prodname_code_scanning_capc %} page, click the **Branch** drop-down and select the relevant branch.
![Choose a branch from the Branch drop-down menu](/assets/images/help/repository/code-scanning-branch-dropdown.png)
![Choose a branch from the Branch drop-down menu](/assets/images/help/repository/code-scanning-branch-dropdown.png)
The solution in this situation is to add the name of the base branch to the `on:push` and `on:pull_request` specification in the {% data variables.product.prodname_code_scanning %} workflow on that branch and then make a change that updates the open pull request that you want to scan.
@@ -189,6 +193,8 @@ There are other situations where there may be no analysis for the latest commit
Merge a trivial change into the base branch to trigger {% data variables.product.prodname_code_scanning %} on this latest commit, then push a change to the pull request to retrigger {% data variables.product.prodname_code_scanning %}.
{% endif %}
## Next steps
After setting up {% data variables.product.prodname_code_scanning %}, and allowing its actions to complete, you can:

View File

@@ -33,8 +33,6 @@ In repositories where {% data variables.product.prodname_code_scanning %} is con
- The **Conversation** tab of the pull request, as part of a pull request review {% endif %}
- The **Files changed** tab of the pull request
{% ifversion code-scanning-pr-conversations-tab %} {% endif %}
If you have write permission for the repository, you can see any existing {% data variables.product.prodname_code_scanning %} alerts on the **Security** tab. For information about repository alerts, see "[Managing {% data variables.product.prodname_code_scanning %} alerts for your repository](/code-security/secure-coding/managing-code-scanning-alerts-for-your-repository)."
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}

View File

@@ -45,9 +45,15 @@ If you upload a second SARIF file for a commit with the same category and from t
If you're new to SARIF and want to learn more, see Microsoft's [`SARIF tutorials`](https://github.com/microsoft/sarif-tutorials) repository.
## Preventing duplicate alerts using fingerprints
## Providing data to track {% data variables.product.prodname_code_scanning %} alerts across runs
Each time the results of a new code scan are uploaded, the results are processed and alerts are added to the repository. To prevent duplicate alerts for the same problem, {% data variables.product.prodname_code_scanning %} uses fingerprints to match results across various runs so they only appear once in the latest run for the selected branch. This makes it possible to match alerts to the right line of code when files are edited.
Each time the results of a new code scan are uploaded, the results are processed and alerts are added to the repository. To prevent duplicate alerts for the same problem, {% data variables.product.prodname_code_scanning %} uses fingerprints to match results across various runs so they only appear once in the latest run for the selected branch. This makes it possible to match alerts to the correct line of code when files are edited. The `ruleID` for a result has to be the same across analysis.
### Reporting consistent filepaths
The filepath has to be consistent across the runs to enable a computation of a stable fingerprint. If the filepaths differ for the same result, each time there is a new analysis a new alert will be created, and the old one will be closed. This will cause having multiple alerts for the same result.
### Including data for fingerprint generation
{% data variables.product.prodname_dotcom %} uses the `partialFingerprints` property in the OASIS standard to detect when two results are logically identical. For more information, see the "[partialFingerprints property](https://docs.oasis-open.org/sarif/sarif/v2.1.0/cs01/sarif-v2.1.0-cs01.html#_Toc16012611)" entry in the OASIS documentation.
@@ -65,6 +71,29 @@ SARIF files support both rules and results. The information stored in these elem
When you compare SARIF files generated by analyzing different codebases with the same tool and rules, you should see differences in the results of the analyses but not in the rules.
## Specifying the root for source files
{% data variables.product.prodname_code_scanning_capc %} interprets results that are reported with relative paths as relative to the root of the repository analyzed. If a result contains an absolute URI, the URI is converted to a relative URI. The relative URI can then be matched against a file committed to the repository.
You can provide the source root for conversion from absolute to relative URIs in one of the following ways.
- [`checkout_path`](https://github.com/github/codeql-action/blob/c2c0a2908e95769d01b907f9930050ecb5cf050d/analyze/action.yml#L44-L47) input to the `github/codeql-action/analyze` action
- `checkout_uri` parameter to the SARIF upload API endpoint. For more information, see "[{% data variables.product.prodname_code_scanning_capc %}](/rest/code-scanning#upload-an-analysis-as-sarif-data)" in the REST API documentation
- [`invocation.workingDirectory.uri`](https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#_Toc9244365) property in the SARIF file
If you provide a source root, any location of an artifact specified using an absolute URI must use the same URI scheme. If there is a mismatch between the URI scheme for the source root and one or more of the absolute URIs, the upload is rejected.
For example, a SARIF file is uploaded using a source root of `file:///github/workspace`.
```
# Conversion of absolute URIs to relative URIs for location artifacts
file:///github/workspace/src/main.go -> src/main.go
file:///tmp/go-build/tmp.go -> file:///tmp/go-build/tmp.go
```
The file is successfully uploaded as both absolute URIs use the same URI scheme as the source root.
## Validating your SARIF file
<!--UI-LINK: When code scanning fails, the error banner shown in the Security > Code scanning alerts view links to this anchor.-->
@@ -77,6 +106,12 @@ You can check a SARIF file is compatible with {% data variables.product.prodname
If you use a code analysis engine other than {% data variables.product.prodname_codeql %}, you can review the supported SARIF properties to optimize how your analysis results will appear on {% data variables.product.prodname_dotcom %}.
{% note %}
**Note:** You must supply an explicit value for any property marked as "required". The empty string is not supported for required properties.
{% endnote %}
Any valid SARIF 2.1.0 output file can be uploaded, however, {% data variables.product.prodname_code_scanning %} will only use the following supported properties.
### `sarifLog` object
@@ -95,6 +130,7 @@ Any valid SARIF 2.1.0 output file can be uploaded, however, {% data variables.pr
|----|----|
| `tool.driver` | **Required.** A `toolComponent` object that describes the analysis tool. For more information, see the [`toolComponent` object](#toolcomponent-object). |
| `tool.extensions[]` | **Optional.** An array of `toolComponent` objects that represent any plugins or extensions used by the tool during analysis. For more information, see the [`toolComponent` object](#toolcomponent-object). |
| `invocation.workingDirectory.uri` | **Optional.** This field is used only when `checkout_uri` (SARIF upload API only) or `checkout_path` (% data variables.product.prodname_actions %} only) are not provided. The value is used to convert absolute URIs used in [`physicalLocation` objects](#physicallocation-object) to relative URIs. For more information, see "[Specifying the root for source files](#specifying-the-root-for-source-files)."|
| `results[]` | **Required.** The results of the analysis tool. {% data variables.product.prodname_code_scanning_capc %} displays the results on {% data variables.product.prodname_dotcom %}. For more information, see the [`result` object](#result-object).
### `toolComponent` object
@@ -138,7 +174,7 @@ Each `result` object contains details for one alert in the codebase. Within the
| `level`| **Optional.** The severity of the result. This level overrides the default severity defined by the rule. {% data variables.product.prodname_code_scanning_capc %} uses the level to filter results by severity on {% data variables.product.prodname_dotcom %}.
| `message.text`| **Required.** A message that describes the result. {% data variables.product.prodname_code_scanning_capc %} displays the message text as the title of the result. Only the first sentence of the message will be displayed when visible space is limited.
| `locations[]`| **Required.** The set of locations where the result was detected up to a maximum of 10. Only one location should be included unless the problem can only be corrected by making a change at every specified location. **Note:** At least one location is required for {% data variables.product.prodname_code_scanning %} to display a result. {% data variables.product.prodname_code_scanning_capc %} will use this property to decide which file to annotate with the result. Only the first value of this array is used. All other values are ignored.
| `partialFingerprints`| **Required.** A set of strings used to track the unique identity of the result. {% data variables.product.prodname_code_scanning_capc %} uses `partialFingerprints` to accurately identify which results are the same across commits and branches. {% data variables.product.prodname_code_scanning_capc %} will attempt to use `partialFingerprints` if they exist. If you are uploading third-party SARIF files with the `upload-action`, the action will create `partialFingerprints` for you when they are not included in the SARIF file. For more information, see "[Preventing duplicate alerts using fingerprints](#preventing-duplicate-alerts-using-fingerprints)." **Note:** {% data variables.product.prodname_code_scanning_capc %} only uses the `primaryLocationLineHash`.
| `partialFingerprints`| **Required.** A set of strings used to track the unique identity of the result. {% data variables.product.prodname_code_scanning_capc %} uses `partialFingerprints` to accurately identify which results are the same across commits and branches. {% data variables.product.prodname_code_scanning_capc %} will attempt to use `partialFingerprints` if they exist. If you are uploading third-party SARIF files with the `upload-action`, the action will create `partialFingerprints` for you when they are not included in the SARIF file. For more information, see "[Providing data to track code scanning alerts across runs](#providing-data-to-track-code-scanning-alerts-across-runs)." **Note:** {% data variables.product.prodname_code_scanning_capc %} only uses the `primaryLocationLineHash`.
| `codeFlows[].threadFlows[].locations[]`| **Optional.** An array of `location` objects for a `threadFlow` object, which describes the progress of a program through a thread of execution. A `codeFlow` object describes a pattern of code execution used to detect a result. If code flows are provided, {% data variables.product.prodname_code_scanning %} will expand code flows on {% data variables.product.prodname_dotcom %} for the relevant result. For more information, see the [`location` object](#location-object).
| `relatedLocations[]`| A set of locations relevant to this result. {% data variables.product.prodname_code_scanning_capc %} will link to related locations when they are embedded in the result message. For more information, see the [`location` object](#location-object).
@@ -156,7 +192,7 @@ A location within a programming artifact, such as a file in the repository or a
| Name | Description |
|----|----|
| `artifactLocation.uri`| **Required.** A URI indicating the location of an artifact, usually a file either in the repository or generated during a build. If the URI is relative, it should be relative to the root of the {% data variables.product.prodname_dotcom %} repository being analyzed. For example, main.js or src/script.js are relative to the root of the repository. If the URI is absolute, {% data variables.product.prodname_code_scanning %} can use the URI to checkout the artifact and match up files in the repository. For example, `https://github.com/ghost/example/blob/00/src/promiseUtils.js`.
| `artifactLocation.uri`| **Required.** A URI indicating the location of an artifact, usually a file either in the repository or generated during a build. For the best results we recommend that this is a relative path from the root of the GitHub repository being analyzed. For example, `src/main.js`. For more information about artifact URIs, see "[Specifying the root for source files](#specifying-the-root-for-source-files)."|
| `region.startLine` | **Required.** The line number of the first character in the region.
| `region.startColumn` | **Required.** The column number of the first character in the region.
| `region.endLine` | **Required.** The line number of the last character in the region.
@@ -204,7 +240,7 @@ These example SARIF output files show supported properties and example values.
### Example with minimum required properties
This SARIF output file has example values to show the minimum required properties for {% data variables.product.prodname_code_scanning %} results to work as expected. If you remove any properties or don't include values, this data will not be displayed correctly or sync on {% data variables.product.prodname_dotcom %}.
This SARIF output file has example values to show the minimum required properties for {% data variables.product.prodname_code_scanning %} results to work as expected. If you remove any properties, omit values, or use an empty string, this data will not be displayed correctly or sync on {% data variables.product.prodname_dotcom %}.
```json
{

View File

@@ -58,7 +58,7 @@ For more information see the [`upload-sarif` action](https://github.com/github/c
The `upload-sarif` action can be configured to run when the `push` and `scheduled` event occur. For more information about {% data variables.product.prodname_actions %} events, see "[Events that trigger workflows](/actions/reference/events-that-trigger-workflows)."
If your SARIF file doesn't include `partialFingerprints`, the `upload-sarif` action will calculate the `partialFingerprints` field for you and attempt to prevent duplicate alerts. {% data variables.product.prodname_dotcom %} can only create `partialFingerprints` when the repository contains both the SARIF file and the source code used in the static analysis. For more information about preventing duplicate alerts, see "[About SARIF support for code scanning](/code-security/secure-coding/sarif-support-for-code-scanning#preventing-duplicate-alerts-using-fingerprints)."
If your SARIF file doesn't include `partialFingerprints`, the `upload-sarif` action will calculate the `partialFingerprints` field for you and attempt to prevent duplicate alerts. {% data variables.product.prodname_dotcom %} can only create `partialFingerprints` when the repository contains both the SARIF file and the source code used in the static analysis. For more information about preventing duplicate alerts, see "[About SARIF support for code scanning](/code-security/secure-coding/sarif-support-for-code-scanning#providing-data-to-track-code-scanning-alerts-across-runs)."
{% data reusables.code-scanning.upload-sarif-alert-limit %}

View File

@@ -78,7 +78,8 @@ You can display the command-line help for any command using the <nobr>`--help`</
| <nobr>`--command`</nobr> | | Recommended. Use to specify the build command or script that invokes the build process for the codebase. Commands are run from the current folder or, where it is defined, from <nobr>`--source-root`</nobr>. Not needed for Python and JavaScript/TypeScript analysis. |
| <nobr>`--db-cluster`</nobr> | | Optional. Use in multi-language codebases to generate one database for each language specified by <nobr>`--language`</nobr>.
| <nobr>`--no-run-unnecessary-builds`</nobr> | | Recommended. Use to suppress the build command for languages where the {% data variables.product.prodname_codeql_cli %} does not need to monitor the build (for example, Python and JavaScript/TypeScript).
| <nobr>`--source-root`</nobr> | | Optional. Use if you run the CLI outside the checkout root of the repository. By default, the `database create` command assumes that the current directory is the root directory for the source files, use this option to specify a different location. |
| <nobr>`--source-root`</nobr> | | Optional. Use if you run the CLI outside the checkout root of the repository. By default, the `database create` command assumes that the current directory is the root directory for the source files, use this option to specify a different location. |{% ifversion fpt or ghec or ghes > 3.2 or ghae %}
| <nobr>`--codescanning-config`</nobr> | | Optional (Advanced). Use if you have a configuration file that specifies how to create the {% data variables.product.prodname_codeql %} databases and what queries to run in later steps. For more information, see "[Using a custom configuration file](/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-a-custom-configuration-file)" and "[database create](https://codeql.github.com/docs/codeql-cli/manual/database-create/#cmdoption-codeql-database-create-codescanning-config)." |{% endif %}
For more information, see [Creating {% data variables.product.prodname_codeql %} databases](https://codeql.github.com/docs/codeql-cli/creating-codeql-databases/) in the documentation for the {% data variables.product.prodname_codeql_cli %}.

View File

@@ -52,9 +52,9 @@ Generally, we name our supported ecosystems after the software programming langu
- Composer (registry: https://packagist.org/){% ifversion GH-advisory-db-erlang-support %}
- Erlang (registry: https://hex.pm/){% endif %}
- Go (registry: https://pkg.go.dev/)
{% ifversion fpt or ghec or ghes > 3.6 or ghae-issue-7508 %}
{%- ifversion fpt or ghec or ghes > 3.6 or ghae-issue-7508 %}
- GitHub Actions (https://github.com/marketplace?type=actions/) {% endif %}
- Maven (registry: https://repo1.maven.org/maven2/org/)
- Maven (registry: https://repo.maven.apache.org/maven2)
- npm (registry: https://www.npmjs.com/)
- NuGet (registry: https://www.nuget.org/)
- pip (registry: https://pypi.org/)

View File

@@ -417,7 +417,7 @@ updates:
### `open-pull-requests-limit`
By default, {% data variables.product.prodname_dependabot %} opens a maximum of five pull requests for version updates. Once there are five open pull requests, new requests are blocked until you merge or close some of the open requests, after which new pull requests can be opened on subsequent updates. Use `open-pull-requests-limit` to change this limit. This also provides a simple way to temporarily disable version updates for a package manager.
By default, {% data variables.product.prodname_dependabot %} opens a maximum of five pull requests for version updates. Once there are five open pull requests from {% data variables.product.prodname_dependabot %}, {% data variables.product.prodname_dependabot %} will not open any new requests until some of those open requests are merged or closed. Use `open-pull-requests-limit` to change this limit. This also provides a simple way to temporarily disable version updates for a package manager.
This option has no impact on security updates, which have a separate, internal limit of ten open pull requests.

View File

@@ -75,7 +75,7 @@ The following configuration options are available.
{% endtip %}
{% endif %}
This {% data variables.product.prodname_dependency_review_action %} example file illustrates how you can use these configuration options.
This {% data variables.product.prodname_dependency_review_action %} example file illustrates how you can use these configuration options. Notice that the example uses short version number for the action (`v2`) instead of a semver release number (for example, `v2.0.8`). This ensures that you use the most recent minor version of the action.
```yaml{:copy}
name: 'Dependency Review'

View File

@@ -12,25 +12,25 @@ shortTitle: Private image registry
## About private image registries and {% data variables.product.prodname_github_codespaces %}
A registry is a secure space for storing, managing, and fetching private container images. You may use one to store one or more images. There are many examples of registries, such as {% data variables.product.prodname_dotcom %} Container Registry, Azure Container Registry, or DockerHub.
A registry is a secure space for storing, managing, and fetching private container images. You may use one to store one or more images. There are many examples of registries, such as {% data variables.product.prodname_container_registry %}, {% data variables.product.prodname_npm_registry %}, Azure Container Registry, or DockerHub.
{% data variables.product.prodname_dotcom %} Container Registry can be configured to pull container images seamlessly, without having to provide any authentication credentials to {% data variables.product.prodname_github_codespaces %}. For other image registries, you must create secrets in {% data variables.product.prodname_dotcom %} to store the access details, which will allow {% data variables.product.prodname_codespaces %} to access images stored in that registry.
{% data variables.product.prodname_ghcr_and_npm_registry %} can be configured to allow container images to be pulled seamlessly into {% data variables.product.prodname_github_codespaces %} during codespace creation, without having to provide any authentication credentials. For other image registries, you must create secrets in {% data variables.product.prodname_dotcom %} to store the access details, which will allow {% data variables.product.prodname_codespaces %} to access images stored in that registry.
## Accessing images stored in {% data variables.product.prodname_dotcom %} Container Registry
## Accessing images stored in {% data variables.product.prodname_ghcr_and_npm_registry %}
{% data variables.product.prodname_dotcom %} Container Registry is the easiest way for {% data variables.product.prodname_codespaces %} to consume devcontainer container images.
{% data variables.product.prodname_ghcr_and_npm_registry %} provide the easiest way for {% data variables.product.prodname_codespaces %} to consume dev container images.
For more information, see "[Working with the Container registry](/packages/working-with-a-github-packages-registry/working-with-the-container-registry)".
For more information, see "[Working with the Container registry](/packages/working-with-a-github-packages-registry/working-with-the-container-registry)" and "[Working with the npm registry](/packages/working-with-a-github-packages-registry/working-with-the-npm-registry)".
### Accessing an image published to the same repository as the codespace
If you publish a container image to {% data variables.product.prodname_dotcom %} Container Registry in the same repository that the codespace is being launched in, you will automatically be able to fetch that image on codespace creation. You won't have to provide any additional credentials, unless the **Inherit access from repo** option was unselected when the container image was published.
If you publish a container image to {% data variables.product.prodname_ghcr_or_npm_registry %} in the same repository that the codespace is being launched in, you will automatically be able to fetch that image on codespace creation. You won't have to provide any additional credentials, unless the **Inherit access from repo** option was unselected when the container image was published.
#### Inheriting access from the repository from which an image was published
By default, when you publish a container image to {% data variables.product.prodname_dotcom %} Container Registry, the image inherits the access setting of the repository from which the image was published. For example, if the repository is public, the image is also public. If the repository is private, the image is also private, but is accessible from the repository.
By default, when you publish a container image to {% data variables.product.prodname_ghcr_or_npm_registry %}, the image inherits the access setting of the repository from which the image was published. For example, if the repository is public, the image is also public. If the repository is private, the image is also private, but is accessible from the repository.
This behavior is controlled by the **Inherit access from repo** option. **Inherit access from repo** is selected by default when publishing via {% data variables.product.prodname_actions %}, but not when publishing directly to {% data variables.product.prodname_dotcom %} Container Registry using a Personal Access Token (PAT).
This behavior is controlled by the **Inherit access from repo** option. **Inherit access from repo** is selected by default when publishing via {% data variables.product.prodname_actions %}, but not when publishing directly to {% data variables.product.prodname_ghcr_or_npm_registry %} using a Personal Access Token (PAT).
If the **Inherit access from repo** option was not selected when the image was published, you can manually add the repository to the published container image's access controls. For more information, see "[Configuring a package's access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility#inheriting-access-for-a-container-image-from-a-repository)."
@@ -46,13 +46,13 @@ If you want to allow a subset of an organization's repositories to access a cont
### Publishing a container image from a codespace
Seamless access from a codespace to {% data variables.product.prodname_dotcom %} Container Registry is limited to pulling container images. If you want to publish a container image from inside a codespace, you must use a personal access token (PAT) with the `write:packages` scope.
Seamless access from a codespace to {% data variables.product.prodname_ghcr_or_npm_registry %} is limited to pulling container images. If you want to publish a container image from inside a codespace, you must use a personal access token (PAT) with the `write:packages` scope.
We recommend publishing images via {% data variables.product.prodname_actions %}. For more information, see "[Publishing Docker images](/actions/publishing-packages/publishing-docker-images)."
We recommend publishing images via {% data variables.product.prodname_actions %}. For more information, see "[Publishing Docker images](/actions/publishing-packages/publishing-docker-images)" and "[Publishing Node.js packages](/actions/publishing-packages/publishing-nodejs-packages)."
## Accessing images stored in other container registries
If you are accessing a container image from a registry that isn't {% data variables.product.prodname_dotcom %} Container Registry, {% data variables.product.prodname_codespaces %} checks for the presence of three secrets, which define the server name, username, and personal access token (PAT) for a container registry. If these secrets are found, {% data variables.product.prodname_github_codespaces %} will make the registry available inside your codespace.
If you are accessing a container image from a registry that isn't {% data variables.product.prodname_ghcr_or_npm_registry %}, {% data variables.product.prodname_codespaces %} checks for the presence of three secrets, which define the server name, username, and personal access token (PAT) for a container registry. If these secrets are found, {% data variables.product.prodname_github_codespaces %} will make the registry available inside your codespace.
- `<*>_CONTAINER_REGISTRY_SERVER`
- `<*>_CONTAINER_REGISTRY_USER`

View File

@@ -17,7 +17,7 @@ redirect_from:
## About the {% data variables.product.prodname_vscode_command_palette %}
The Command Palette is one of the focal features of {% data variables.product.prodname_vscode %} and is available for you to use in {% data variables.product.prodname_github_codespaces %}. The {% data variables.product.prodname_vscode_command_palette %} allows you to access many commands for {% data variables.product.prodname_codespaces %} and {% data variables.product.prodname_vscode_shortname %}. For more information on using the {% data variables.product.prodname_vscode_command_palette_shortname %}, see "[User Interface](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette)" in the {% data variables.product.prodname_vscode_shortname %} documentation.
The Command Palette is one of the focal features of {% data variables.product.prodname_vscode %} and is available for you to use in {% data variables.product.prodname_github_codespaces %}. The Command Palette allows you to access many commands for {% data variables.product.prodname_codespaces %} and {% data variables.product.prodname_vscode_shortname %}. For more information on using the {% data variables.product.prodname_vscode_command_palette_shortname %}, see "[User Interface](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette)" in the {% data variables.product.prodname_vscode_shortname %} documentation.
## Accessing the {% data variables.product.prodname_vscode_command_palette_shortname %}

View File

@@ -149,3 +149,6 @@ Replace `machine-type` with a valid identifier for an available machine type. Id
For full details of the options for this command, see [the {% data variables.product.prodname_cli %} manual](https://cli.github.com/manual/gh_codespace_create).
{% endcli %}
## Further reading
- "[Adding an 'Open in GitHub Codespaces' badge](/codespaces/setting-up-your-project-for-codespaces/adding-a-codespaces-badge)"

View File

@@ -99,4 +99,4 @@ gh cs delete --repo octo-org/octo-repo --days 7
{% endcli %}
## Further reading
- [Codespaces lifecycle](/codespaces/developing-in-codespaces/codespaces-lifecycle)
- "[Codespaces lifecycle](/codespaces/developing-in-codespaces/codespaces-lifecycle)"

View File

@@ -0,0 +1,120 @@
---
title: Getting started with GitHub Codespaces for machine learning
shortTitle: Machine learning
intro: 'Learn about working on machine learning projects with {% data variables.product.prodname_github_codespaces %} and its out-of-the-box tools.'
product: '{% data reusables.gated-features.codespaces %}'
versions:
fpt: '*'
ghec: '*'
type: tutorial
topics:
- Codespaces
- Developer
---
## Introduction
This guide introduces you to machine learning with {% data variables.product.prodname_github_codespaces %}. Youll build a simple image classifier, learn about some of the tools that come preinstalled in {% data variables.product.prodname_github_codespaces %}, configure your development environment for NVIDIA CUDA, and use {% data variables.product.prodname_cli %} to open your codespace in JupyterLab.
## Prerequisite
You have access to {% data variables.product.prodname_github_codespaces %}. For more information, see "[Creating a codespace](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-github-codespaces)."
## Build a simple image classifier
We'll use a Jupyter notebook to build a simple image classifier.
Jupyter notebooks are sets of cells that you can execute one after another. The notebook we'll use includes a number of cells that build an image classifier using [PyTorch](https://pytorch.org/). Each cell is a different phase of that process: download a dataset, set up a neural network, train a model, and then test that model.
We'll run all of the cells, in sequence, to perform all phases of building the image classifier. When we do this Jupyter saves the output back into the notebook so that you can examine the results.
### Creating a repository and a codespace
1. Go to the [github/codespaces-getting-started-ml](https://github.com/github/codespaces-getting-started-ml) template repository and click **Use this template**.
{% data reusables.codespaces.open-codespace-from-template-repo %}
By default, a codespace for this repository opens in a web-based version of {% data variables.product.prodname_vscode %}.
### Open the image classifier notebook
The default container image that's used by {% data variables.product.prodname_github_codespaces %} includes a set of machine learning libraries that are preinstalled in your codespace. For example, Numpy, pandas, SciPy, Matplotlib, seaborn, scikit-learn, TensorFlow, Keras, PyTorch, Requests, and Plotly. For more information about the default image, see "[Introduction to dev containers](/codespaces/setting-up-your-project-for-codespaces/introduction-to-dev-containers#using-the-default-dev-container-configuration)" and [the `devcontainers/images` repository](https://github.com/devcontainers/images/tree/main/src/codespaces#github-codespaces-default-linux-universal).
1. In the {% data variables.product.prodname_vscode_shortname %} editor, close any "Get Started" tabs that are displayed.
1. Open the `image-classifier.ipynb` notebook file.
1. Click the Python kernel link at the top right of the editor.
![Screenshot of the Python kernal link](/assets/images/help/codespaces/jupyter-python-kernel-link.png)
1. In the drop-down menu, choose the kernel in the directory `/opt/python/latest/bin/python`.
![Screenshot of the Python kernal drop-down menu](/assets/images/help/codespaces/jupyter-python-kernel-dropdown.png)
### Build the image classifier
The image classifier notebook contains all the code you need to download a dataset, train a neural network, and evaluate its performance.
1. Click **Run All** to execute all of the notebooks cells.
![Screenshot of the Run All button](/assets/images/help/codespaces/jupyter-run-all.png)
1. Scroll down to view the output of each cell.
![Screenshot of Step 3 in the editor](/assets/images/help/codespaces/jupyter-notebook-step3.png)
## Configure NVIDIA CUDA for your codespace
Some software, such as TensorFlow, requires you to install NVIDIA CUDA to use your codespaces GPU. Where this is the case, you can create your own custom configuration, by using a `devcontainer.json` file, and specify that CUDA should be installed. For more information on creating a custom configuration, see "[Introduction to dev containers](/codespaces/setting-up-your-project-for-codespaces/introduction-to-dev-containers#creating-a-custom-dev-container-configuration)."
{% note %}
**Note**: For full details of the script that's run when you add the `nvidia-cuda` feature, see [the devcontainers/features repository](https://github.com/devcontainers/features/tree/main/src/nvidia-cuda).
{% endnote %}
1. Within a codespace, open the `.devcontainer/devcontainer.json` file in the editor.
1. Add a top-level `features` object with the following contents:
```json{:copy}
“features”: {
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
"installCudnn": true
}
}
```
For more information about the `features` object, see the [development containers specification](https://containers.dev/implementors/features/#devcontainer-json-properties).
If you are using the `devcontainer.json` file from the image classifier repository you created for this tutorial, your `devcontainer.json` file will now look like this:
```
{
"customizations": {
"vscode": {
"extensions": [
"ms-python.python",
"ms-toolsai.jupyter"
]
}
},
“features”: {
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
"installCudnn": true
}
}
}
```
1. Save the change.
{% data reusables.codespaces.rebuild-command %}
The codespace container will be rebuilt. This will take several minutes. When the rebuild is complete the codespace is automatically reopened.
1. Commit the change to the repository so that CUDA will be installed in any new codespaces you create from this repository in future.
## Open your codespace in JupyterLab
The default container image that's used by {% data variables.product.prodname_github_codespaces %} includes JupyterLab, the web-based Jupyter IDE. You can use {% data variables.product.prodname_cli %} to open your codespace in JupyterLab without having to install anything else on your codespace.
1. In the terminal, enter the {% data variables.product.prodname_cli %} command `gh cs jupyter`.
1. Choose the codespace you want to open.
![Screenshot of opening a codespace from the terminal](/assets/images/help/codespaces/open-codespace-in-jupyter.png)

View File

@@ -18,6 +18,7 @@ children:
- /forwarding-ports-in-your-codespace
- /default-environment-variables-for-your-codespace
- /connecting-to-a-private-network
- /getting-started-with-github-codespaces-for-machine-learning
- /using-github-codespaces-in-visual-studio-code
- /using-github-codespaces-with-github-cli
---

Some files were not shown because too many files have changed in this diff Show More